1
0
Fork 0
mirror of https://github.com/dragonflydb/dragonfly.git synced 2024-12-14 11:58:02 +00:00

Improving Readability r3 (#75)

Signed-off-by: Ryan Russell <git@ryanrussell.org>
This commit is contained in:
Ryan Russell 2022-06-01 11:31:36 -05:00 committed by GitHub
parent d5f2c23922
commit a049128ab6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 12 additions and 12 deletions

View file

@ -324,7 +324,7 @@ API 2.0
- [ ] PFCOUNT
- [ ] PFMERGE
Memchache API
Memcache API
- [X] set
- [X] get
- [X] replace

View file

@ -1,4 +1,4 @@
Tiime Dragonfly Redis
Time Dragonfly Redis
4 4738531328 6819917824
5 4738637824 6819917824
6 4738658304 6819913728

1 Tiime Time Dragonfly Redis
2 4 4738531328 6819917824
3 5 4738637824 6819917824
4 6 4738658304 6819913728

View file

@ -12,7 +12,7 @@ namespace dfly {
// DASH: Dynamic And Scalable Hashing.
// TODO: We could name it DACHE: Dynamic and Adaptive caCHE.
// After all, we added additionally improvements we added as part of the dragonfly project,
// After all, we added additional improvements we added as part of the dragonfly project,
// that probably justify a right to choose our own name for this data structure.
struct BasicDashPolicy {
enum { kSlotNum = 12, kBucketNum = 64, kStashBucketNum = 2 };

View file

@ -289,7 +289,7 @@ TEST_F(DashTest, SegmentFull) {
uint32_t probe = segment_.GetBucket(0).GetProbe(true);
EXPECT_EQ((1 << 12) - 1, busy); // Size 12
EXPECT_EQ(539, probe); // verified by running since the test is determenistic.
EXPECT_EQ(539, probe); // verified by running since the test is deterministic.
unsigned keys[12] = {8045, 8085, 8217, 8330, 8337, 8381, 8432, 8506, 8587, 8605, 8612, 8725};
for (unsigned i = 0; i < 12; ++i) {

View file

@ -26,7 +26,7 @@ class ExpirePeriod {
}
// generation id for the base of this duration.
// when we update the generation, we need to update the value as well accoring to this
// when we update the generation, we need to update the value as well according to this
// logic:
// new_val = (old_val + old_base) - new_base.
unsigned generation_id() const {

View file

@ -13,7 +13,7 @@ namespace dfly {
class FlatSet {
struct Hasher {
using is_transparent = void; // to allow heteregenous lookups.
using is_transparent = void; // to allow heterogeneous lookups.
size_t operator()(const CompactObj& o) const {
return o.HashCode();
@ -25,7 +25,7 @@ class FlatSet {
};
struct Eq {
using is_transparent = void; // to allow heteregenous lookups.
using is_transparent = void; // to allow heterogeneous lookups.
bool operator()(const CompactObj& left, const CompactObj& right) const {
return left == right;

View file

@ -232,7 +232,7 @@ void ServerFamily::Load(const std::string& load_path) {
#if 0
auto& pool = service_.proactor_pool();
// Deliberitely run on all I/O threads to update the state for non-shard threads as well.
// Deliberately run on all I/O threads to update the state for non-shard threads as well.
pool.Await([&](ProactorBase*) {
// TODO: There can be a bug where status is different.
CHECK(ServerState::tlocal()->gstate() == GlobalState::IDLE);

View file

@ -240,7 +240,7 @@ TEST_F(StringFamilyTest, SingleShard) {
}
TEST_F(StringFamilyTest, MSetIncr) {
/* serialzable orders
/* serializable orders
init: x=z=0
mset x=z=1

View file

@ -76,7 +76,7 @@ struct TieredStorage::ActiveIoRequest {
mi_free(block_ptr);
}
bool CanAccomodate(size_t length) const {
bool CanAccommodate(size_t length) const {
return batch_offs + length <= kBatchSize;
}
@ -337,7 +337,7 @@ void TieredStorage::FlushPending() {
size_t item_size = it->second.Size();
DCHECK_GT(item_size, 0u);
if (!active_req || !active_req->CanAccomodate(item_size)) {
if (!active_req || !active_req->CanAccommodate(item_size)) {
if (active_req) { // need to close
// save the block asynchronously.
++submitted_io_writes_;

View file

@ -59,7 +59,7 @@ Transaction::~Transaction() {
* There are 4 options that we consider here:
* a. T spans a single shard and its not multi.
* unique_shard_id_ is predefined before the schedule() is called.
* In that case only a single thread will be scheduled and it will use shard_data[0] just becase
* In that case only a single thread will be scheduled and it will use shard_data[0] just because
* shard_data.size() = 1. Coordinator thread can access any data because there is a
* schedule barrier between InitByArgs and RunInShard/IsArmedInShard functions.
* b. T spans multiple shards and its not multi