Skip to content

Commit 85f5015

Browse files
committed
fix(server): Dont apply conservative memory limit when loading/replicating
When loading a snapshot created by the same server configuration (memory and number of shards) we will create a different dash table segment directory tree, because the tree shape is related to the order of entries insertion. Therefore when loading data from snapshot or from replication the conservative memory checks might fail as the new tree might have more segments. Because we dont want to fail loading a snapshot from the same server configuration we disable this checks on loading and replication. Signed-off-by: adi_holden <[email protected]>
1 parent 22eb76d commit 85f5015

File tree

2 files changed

+23
-5
lines changed

2 files changed

+23
-5
lines changed

src/server/db_slice.cc

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,9 @@ class PrimeEvictionPolicy {
8888
static constexpr bool can_gc = true;
8989

9090
PrimeEvictionPolicy(const DbContext& cntx, bool can_evict, ssize_t mem_budget, ssize_t soft_limit,
91-
DbSlice* db_slice)
91+
DbSlice* db_slice, bool apply_memory_limit)
9292
: db_slice_(db_slice), mem_budget_(mem_budget), soft_limit_(soft_limit), cntx_(cntx),
93-
can_evict_(can_evict) {
93+
can_evict_(can_evict), apply_memory_limit_(apply_memory_limit) {
9494
}
9595

9696
// A hook function that is called every time a segment is full and requires splitting.
@@ -128,6 +128,7 @@ class PrimeEvictionPolicy {
128128
// unlike static constexpr can_evict, this parameter tells whether we can evict
129129
// items in runtime.
130130
const bool can_evict_;
131+
const bool apply_memory_limit_;
131132
};
132133

133134
class PrimeBumpPolicy {
@@ -139,7 +140,7 @@ class PrimeBumpPolicy {
139140
};
140141

141142
bool PrimeEvictionPolicy::CanGrow(const PrimeTable& tbl) const {
142-
if (mem_budget_ > soft_limit_)
143+
if (!apply_memory_limit_ || mem_budget_ > soft_limit_)
143144
return true;
144145

145146
DCHECK_LT(tbl.size(), tbl.capacity());
@@ -396,8 +397,23 @@ tuple<PrimeIterator, ExpireIterator, bool> DbSlice::AddOrFind2(const Context& cn
396397
}
397398
}
398399

399-
PrimeEvictionPolicy evp{cntx, (bool(caching_mode_) && !owner_->IsReplica()),
400-
int64_t(memory_budget_ - key.size()), ssize_t(soft_budget_limit_), this};
400+
// In case we are loading from rdb file or replicating we want to disable conservative memory
401+
// checks (inside PrimeEvictionPolicy::CanGrow) and reject insertions only after we pass max
402+
// memory limit. When loading a snapshot created by the same server configuration (memory and
403+
// number of shards) we will create a different dash table segment directory tree, because the
404+
// tree shape is related to the order of entries insertion. Therefore when loading data from
405+
// snapshot or from replication the conservative memory checks might fail as the new tree might
406+
// have more segments. Because we dont want to fail loading a snapshot from the same server
407+
// configuration we disable this checks on loading and replication.
408+
bool apply_memory_limit =
409+
!owner_->IsReplica() && !(ServerState::tlocal()->gstate() == GlobalState::LOADING);
410+
411+
PrimeEvictionPolicy evp{cntx,
412+
(bool(caching_mode_) && !owner_->IsReplica()),
413+
int64_t(memory_budget_ - key.size()),
414+
ssize_t(soft_budget_limit_),
415+
this,
416+
apply_memory_limit};
401417

402418
// If we are over limit in non-cache scenario, just be conservative and throw.
403419
if (!caching_mode_ && evp.mem_budget() < 0) {

src/server/rdb_load.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2289,6 +2289,8 @@ void RdbLoader::LoadItemsBuffer(DbIndex db_ind, const ItemsBuf& ib) {
22892289
void RdbLoader::ResizeDb(size_t key_num, size_t expire_num) {
22902290
DCHECK_LT(key_num, 1U << 31);
22912291
DCHECK_LT(expire_num, 1U << 31);
2292+
// Note: To reserve space, it's necessary to allocate space at the shard level. We might
2293+
// load with different number of shards which makes database resizing unfeasible.
22922294
}
22932295

22942296
error_code RdbLoader::LoadKeyValPair(int type, ObjSettings* settings) {

0 commit comments

Comments
 (0)