diff --git a/README.md b/README.md index 7859d633a..e6f6f7bb9 100644 --- a/README.md +++ b/README.md @@ -324,7 +324,7 @@ API 2.0 - [ ] PFCOUNT - [ ] PFMERGE -Memchache API +Memcache API - [X] set - [X] get - [X] replace diff --git a/doc/memory_bgsave.tsv b/doc/memory_bgsave.tsv index 195cf4ff6..2704c4c86 100644 --- a/doc/memory_bgsave.tsv +++ b/doc/memory_bgsave.tsv @@ -1,4 +1,4 @@ -Tiime Dragonfly Redis +Time Dragonfly Redis 4 4738531328 6819917824 5 4738637824 6819917824 6 4738658304 6819913728 diff --git a/src/core/dash.h b/src/core/dash.h index e779e2494..a66212496 100644 --- a/src/core/dash.h +++ b/src/core/dash.h @@ -12,7 +12,7 @@ namespace dfly { // DASH: Dynamic And Scalable Hashing. // TODO: We could name it DACHE: Dynamic and Adaptive caCHE. -// After all, we added additionally improvements we added as part of the dragonfly project, +// After all, we added additional improvements we added as part of the dragonfly project, // that probably justify a right to choose our own name for this data structure. struct BasicDashPolicy { enum { kSlotNum = 12, kBucketNum = 64, kStashBucketNum = 2 }; diff --git a/src/core/dash_test.cc b/src/core/dash_test.cc index 96beddc86..03212e0da 100644 --- a/src/core/dash_test.cc +++ b/src/core/dash_test.cc @@ -289,7 +289,7 @@ TEST_F(DashTest, SegmentFull) { uint32_t probe = segment_.GetBucket(0).GetProbe(true); EXPECT_EQ((1 << 12) - 1, busy); // Size 12 - EXPECT_EQ(539, probe); // verified by running since the test is determenistic. + EXPECT_EQ(539, probe); // verified by running since the test is deterministic. unsigned keys[12] = {8045, 8085, 8217, 8330, 8337, 8381, 8432, 8506, 8587, 8605, 8612, 8725}; for (unsigned i = 0; i < 12; ++i) { diff --git a/src/core/expire_period.h b/src/core/expire_period.h index ac31d6944..6c0465e2f 100644 --- a/src/core/expire_period.h +++ b/src/core/expire_period.h @@ -26,7 +26,7 @@ class ExpirePeriod { } // generation id for the base of this duration. - // when we update the generation, we need to update the value as well accoring to this + // when we update the generation, we need to update the value as well according to this // logic: // new_val = (old_val + old_base) - new_base. unsigned generation_id() const { diff --git a/src/core/flat_set.h b/src/core/flat_set.h index bb39c3f60..9f3e08b14 100644 --- a/src/core/flat_set.h +++ b/src/core/flat_set.h @@ -13,7 +13,7 @@ namespace dfly { class FlatSet { struct Hasher { - using is_transparent = void; // to allow heteregenous lookups. + using is_transparent = void; // to allow heterogeneous lookups. size_t operator()(const CompactObj& o) const { return o.HashCode(); @@ -25,7 +25,7 @@ class FlatSet { }; struct Eq { - using is_transparent = void; // to allow heteregenous lookups. + using is_transparent = void; // to allow heterogeneous lookups. bool operator()(const CompactObj& left, const CompactObj& right) const { return left == right; diff --git a/src/server/server_family.cc b/src/server/server_family.cc index c589dc68a..04222a8e3 100644 --- a/src/server/server_family.cc +++ b/src/server/server_family.cc @@ -232,7 +232,7 @@ void ServerFamily::Load(const std::string& load_path) { #if 0 auto& pool = service_.proactor_pool(); - // Deliberitely run on all I/O threads to update the state for non-shard threads as well. + // Deliberately run on all I/O threads to update the state for non-shard threads as well. pool.Await([&](ProactorBase*) { // TODO: There can be a bug where status is different. CHECK(ServerState::tlocal()->gstate() == GlobalState::IDLE); diff --git a/src/server/string_family_test.cc b/src/server/string_family_test.cc index c46192bcb..3734528e2 100644 --- a/src/server/string_family_test.cc +++ b/src/server/string_family_test.cc @@ -240,7 +240,7 @@ TEST_F(StringFamilyTest, SingleShard) { } TEST_F(StringFamilyTest, MSetIncr) { - /* serialzable orders + /* serializable orders init: x=z=0 mset x=z=1 diff --git a/src/server/tiered_storage.cc b/src/server/tiered_storage.cc index f97ed21c5..8cab2d2e0 100644 --- a/src/server/tiered_storage.cc +++ b/src/server/tiered_storage.cc @@ -76,7 +76,7 @@ struct TieredStorage::ActiveIoRequest { mi_free(block_ptr); } - bool CanAccomodate(size_t length) const { + bool CanAccommodate(size_t length) const { return batch_offs + length <= kBatchSize; } @@ -337,7 +337,7 @@ void TieredStorage::FlushPending() { size_t item_size = it->second.Size(); DCHECK_GT(item_size, 0u); - if (!active_req || !active_req->CanAccomodate(item_size)) { + if (!active_req || !active_req->CanAccommodate(item_size)) { if (active_req) { // need to close // save the block asynchronously. ++submitted_io_writes_; diff --git a/src/server/transaction.cc b/src/server/transaction.cc index 1fd567c38..81a3c50b3 100644 --- a/src/server/transaction.cc +++ b/src/server/transaction.cc @@ -59,7 +59,7 @@ Transaction::~Transaction() { * There are 4 options that we consider here: * a. T spans a single shard and its not multi. * unique_shard_id_ is predefined before the schedule() is called. - * In that case only a single thread will be scheduled and it will use shard_data[0] just becase + * In that case only a single thread will be scheduled and it will use shard_data[0] just because * shard_data.size() = 1. Coordinator thread can access any data because there is a * schedule barrier between InitByArgs and RunInShard/IsArmedInShard functions. * b. T spans multiple shards and its not multi