1
0
Fork 0
mirror of https://github.com/dragonflydb/dragonfly.git synced 2024-12-15 17:51:06 +00:00

chore(tiering): Fixes (#3225)

Don't run offloading and stashing if we know ahead that there won't be enough space left
This commit is contained in:
Vladislav 2024-07-16 10:30:21 +02:00 committed by GitHub
parent e1b03d605c
commit cdd8d50e70
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 20 additions and 4 deletions

View file

@ -412,9 +412,17 @@ TieredStats TieredStorage::GetStats() const {
}
void TieredStorage::RunOffloading(DbIndex dbid) {
const size_t kMaxIterations = 500;
if (SliceSnapshot::IsSnaphotInProgress())
return;
// Don't run offloading if there's only very little space left
auto disk_stats = op_manager_->GetStats().disk_stats;
if (disk_stats.allocated_bytes + kMaxIterations / 2 * tiering::kPageSize >
disk_stats.max_file_size)
return;
auto cb = [this, dbid, tmp = std::string{}](PrimeIterator it) mutable {
TryStash(dbid, it->first.GetSlice(&tmp), &it->second);
};
@ -429,12 +437,14 @@ void TieredStorage::RunOffloading(DbIndex dbid) {
if (op_manager_->GetStats().pending_stash_cnt >= write_depth_limit_)
break;
offloading_cursor_ = table.TraverseBySegmentOrder(offloading_cursor_, cb);
} while (offloading_cursor_ != start_cursor && iterations++ < 500);
} while (offloading_cursor_ != start_cursor && iterations++ < kMaxIterations);
}
bool TieredStorage::ShouldStash(const PrimeValue& pv) const {
auto disk_stats = op_manager_->GetStats().disk_stats;
return !pv.IsExternal() && !pv.HasIoPending() && pv.ObjType() == OBJ_STRING &&
pv.Size() >= kMinValueSize;
pv.Size() >= kMinValueSize &&
disk_stats.allocated_bytes + tiering::kPageSize + pv.Size() < disk_stats.max_file_size;
}
} // namespace dfly

View file

@ -11,6 +11,7 @@
#include "io/io_buf.h"
#include "server/error.h"
#include "server/tiering/common.h"
#include "server/tiering/external_alloc.h"
#include "util/fibers/uring_proactor.h"
using namespace ::dfly::tiering::literals;
@ -172,7 +173,10 @@ std::error_code DiskStorage::Stash(io::Bytes bytes, io::Bytes footer, StashCb cb
backing_file_->WriteFixedAsync(buf.bytes, offset, *buf.buf_idx, std::move(io_cb));
else
backing_file_->WriteAsync(buf.bytes, offset, std::move(io_cb));
if (alloc_.allocated_bytes() > (size_ * 0.85) && !grow_pending_) {
// Grow in advance if needed and possible
if (alloc_.allocated_bytes() > (size_ * 0.85) &&
size_ + ExternalAllocator::kExtAlignment < static_cast<size_t>(max_size_) && !grow_pending_) {
auto ec = Grow(265_MB);
LOG_IF(ERROR, ec) << "Could not call grow :" << ec.message();
return ec;
@ -181,7 +185,8 @@ std::error_code DiskStorage::Stash(io::Bytes bytes, io::Bytes footer, StashCb cb
}
DiskStorage::Stats DiskStorage::GetStats() const {
return {alloc_.allocated_bytes(), alloc_.capacity(), heap_buf_alloc_cnt_, reg_buf_alloc_cnt_};
return {alloc_.allocated_bytes(), alloc_.capacity(), heap_buf_alloc_cnt_, reg_buf_alloc_cnt_,
static_cast<size_t>(max_size_)};
}
std::error_code DiskStorage::Grow(off_t grow_size) {

View file

@ -22,6 +22,7 @@ class DiskStorage {
size_t capacity_bytes = 0;
uint64_t heap_buf_alloc_count = 0;
uint64_t registered_buf_alloc_count = 0;
size_t max_file_size = 0;
};
using ReadCb = std::function<void(io::Result<std::string_view>)>;