1
0
Fork 0
mirror of https://github.com/dragonflydb/dragonfly.git synced 2024-12-14 11:58:02 +00:00

Add dash table

This commit is contained in:
Roman Gershman 2022-01-19 21:43:23 +02:00
parent 5923c22d99
commit ff9e13c1c5
4 changed files with 2245 additions and 0 deletions

View file

@ -3,3 +3,4 @@ cxx_link(dfly_core base absl::flat_hash_map redis_lib)
cxx_test(dfly_core_test dfly_core LABELS DFLY)
cxx_test(compact_object_test dfly_core LABELS DFLY)
cxx_test(dash_test dfly_core LABELS DFLY)

571
core/dash.h Normal file
View file

@ -0,0 +1,571 @@
// Copyright 2021, Roman Gershman. All rights reserved.
// See LICENSE for licensing terms.
//
#pragma once
#include <memory_resource>
#include <vector>
#include "core/dash_internal.h"
namespace dfly {
struct BasicDashPolicy {
enum { kSlotNum = 12, kBucketNum = 64, kStashBucketNum = 2 };
template <typename U> static void DestroyValue(const U&) {
}
template <typename U> static void DestroyKey(const U&) {
}
template <typename U, typename V> static bool Equal(U&& u, V&& v) {
return u == v;
}
};
template <typename _Key, typename _Value, typename Policy>
class DashTable : public detail::DashTableBase {
DashTable(const DashTable&) = delete;
DashTable& operator=(const DashTable&) = delete;
struct SegmentPolicy {
static constexpr unsigned NUM_SLOTS = Policy::kSlotNum;
static constexpr unsigned BUCKET_CNT = Policy::kBucketNum;
static constexpr unsigned STASH_BUCKET_NUM = Policy::kStashBucketNum;
};
using Base = detail::DashTableBase;
using SegmentType = detail::Segment<_Key, _Value, SegmentPolicy>;
using SegmentIterator = typename SegmentType::Iterator;
public:
using Key_t = _Key;
using Value_t = _Value;
static constexpr unsigned kLogicalBucketNum = Policy::kBucketNum;
static constexpr unsigned kPhysicalBucketNum = SegmentType::kTotalBuckets;
static constexpr unsigned kBucketSize = Policy::kSlotNum;
static constexpr double kTaxAmount = SegmentType::kTaxSize;
static constexpr size_t kSegBytes = sizeof(SegmentType);
static constexpr size_t kSegCapacity = SegmentType::capacity();
// if IsSingleBucket is true - iterates only over a single bucket.
template <bool IsConst, bool IsSingleBucket = false> class Iterator {
using Owner = std::conditional_t<IsConst, const DashTable, DashTable>;
Owner* owner_;
uint32_t seg_id_;
uint8_t bucket_id_;
uint8_t slot_id_;
friend class DashTable;
Iterator(Owner* me, uint32_t seg_id, uint8_t bid, uint8_t sid)
: owner_(me), seg_id_(seg_id), bucket_id_(bid), slot_id_(sid) {
}
void FindValid() {
if constexpr (IsSingleBucket) {
const auto& b = owner_->segment_[seg_id_]->GetBucket(bucket_id_);
uint32_t mask = b.GetBusy() >> slot_id_;
if (mask) {
int slot = __builtin_ctz(mask);
slot_id_ += slot;
return;
}
} else {
while (seg_id_ < owner_->segment_.size()) {
auto seg_it = owner_->segment_[seg_id_]->FindValidStartingFrom(bucket_id_, slot_id_);
if (seg_it.found()) {
bucket_id_ = seg_it.index;
slot_id_ = seg_it.slot;
return;
}
seg_id_ = owner_->NextSeg(seg_id_);
bucket_id_ = slot_id_ = 0;
}
}
owner_ = nullptr;
}
public:
using iterator_category = std::forward_iterator_tag;
using value_type = const Value_t;
using difference_type = std::ptrdiff_t;
using reference = value_type&;
using pointer = value_type*;
// Copy constructor from iterator to const_iterator.
template <bool TIsConst = IsConst, bool TIsSingleB,
typename std::enable_if<TIsConst>::type* = nullptr>
Iterator(const Iterator<!TIsConst, TIsSingleB>& other) noexcept
: owner_(other.owner_), seg_id_(other.seg_id_), bucket_id_(other.bucket_id_),
slot_id_(other.slot_id_) {
}
// Copy constructor from bucket_iterator to iterator.
template <bool TIsSingleB>
Iterator(const Iterator<IsConst, TIsSingleB>& other) noexcept
: owner_(other.owner_), seg_id_(other.seg_id_), bucket_id_(other.bucket_id_),
slot_id_(other.slot_id_) {
}
Iterator() : owner_(nullptr), seg_id_(0), bucket_id_(0), slot_id_(0) {
}
Iterator(const Iterator& other) = default;
Iterator(Iterator&& other) = default;
Iterator& operator=(const Iterator& other) = default;
Iterator& operator=(Iterator&& other) = default;
// pre
Iterator& operator++() {
++slot_id_;
FindValid();
return *this;
}
Iterator& operator+=(int delta) {
slot_id_ += delta;
FindValid();
return *this;
}
const Key_t& key() const {
return owner_->segment_[seg_id_]->Key(bucket_id_, slot_id_);
}
// Generally we should not expose this method since hash-tables can not allow changing the keys
// of their key-value pairs. However, we need it for AddOrFind semantics - so we could implement
// without allocations. Use this method with caution - it might break hash-table.
Key_t* mutable_key() {
return &owner_->segment_[seg_id_]->Key(bucket_id_, slot_id_);
};
typename std::conditional<IsConst, const Value_t&, Value_t&>::type value() const {
return owner_->segment_[seg_id_]->Value(bucket_id_, slot_id_);
}
pointer operator->() const {
return std::addressof(value());
}
reference operator*() const {
return value();
}
// Make it self-contained. Does not need container::end().
bool is_valid() const {
return owner_ != nullptr;
}
friend bool operator==(const Iterator& lhs, const Iterator& rhs) {
if (lhs.owner_ == nullptr && rhs.owner_ == nullptr)
return true;
return lhs.owner_ == rhs.owner_ && lhs.seg_id_ == rhs.seg_id_ &&
lhs.bucket_id_ == rhs.bucket_id_ && lhs.slot_id_ == rhs.slot_id_;
}
friend bool operator!=(const Iterator& lhs, const Iterator& rhs) {
return !(lhs == rhs);
}
// debug accessors.
unsigned bucket_id() const {
return bucket_id_;
}
unsigned slot_id() const {
return slot_id_;
}
unsigned segment_id() const {
return seg_id_;
}
};
using const_iterator = Iterator<true>;
using iterator = Iterator<false>;
using const_bucket_iterator = Iterator<true, true>;
using bucket_iterator = Iterator<false, true>;
struct EvictionCandidates {
bucket_iterator iter[2 + Policy::kStashBucketNum];
uint64_t key_hash; // key_hash of a key that we try to insert.
};
// EvictionCb is called when Insertion needs to evict items in a segment to make room for a new
// item.
using EvictionCb = std::function<unsigned(const EvictionCandidates&)>;
struct EvictionPolicy {
EvictionCb evict_cb;
size_t max_capacity = UINT64_MAX;
};
DashTable(size_t capacity_log = 1, const Policy& policy = Policy{},
std::pmr::memory_resource* mr = std::pmr::get_default_resource());
~DashTable();
void Reserve(size_t size);
// false for duplicate, true if inserted.
template <typename U, typename V> std::pair<iterator, bool> Insert(U&& key, V&& value) {
return InsertInternal(std::forward<U>(key), std::forward<V>(value), EvictionPolicy{});
}
template <typename U, typename V>
std::pair<iterator, bool> Insert(U&& key, V&& value, const EvictionPolicy& ev) {
return InsertInternal(std::forward<U>(key), std::forward<V>(value), ev);
}
template <typename U> const_iterator Find(U&& key) const;
template <typename U> iterator Find(U&& key);
// it must be valid.
void Erase(iterator it);
size_t Erase(const Key_t& k);
iterator begin() {
iterator it{this, 0, 0, 0};
it.FindValid();
return it;
}
const_iterator cbegin() const {
const_iterator it{this, 0, 0, 0};
it.FindValid();
return it;
}
iterator end() const {
return iterator{};
}
const_iterator cend() const {
return const_iterator{};
}
using Base::depth;
using Base::size;
using Base::unique_segments;
template <typename U> uint64_t DoHash(const U& k) const {
return policy_.HashFn(k);
}
// Flat memory usage (allocated) of the table, not including the the memory allocated
// by the hosted objects.
size_t mem_usage() const {
return segment_.capacity() * sizeof(void*) + sizeof(SegmentType) * unique_segments_;
}
size_t bucket_count() const {
return unique_segments_ * SegmentType::capacity();
}
double load_factor() const {
return double(size()) / (SegmentType::capacity() * unique_segments());
}
// Traverses over a single bucket in table and calls cb(iterator) 0 or more
// times. if cursor=0 starts traversing from the beginning, otherwise continues from where it
// stopped. returns 0 if the supplied cursor reached end of traversal. Traverse iterates at bucket
// granularity, which means for each non-empty bucket it calls cb per each entry in the bucket
// before returning. Unlike begin/end interface, traverse is more stable during table mutations.
// It guarantees that if key exists at the beginning of traversal, stays in the table during the
// traversal, it will eventually reach it even when the table shrinks or grows.
template <typename Cb> uint64_t Traverse(uint64_t cursor, Cb&& cb);
// Takes an iterator pointing to an entry in a dash bucket and traverses all bucket's entries by
// calling cb(iterator) for every non-empty slot. The iteration goes over a physical bucket.
template <typename Cb> void TraverseBucket(const_iterator it, Cb&& cb);
static const_bucket_iterator bucket_it(const_iterator it) {
return const_bucket_iterator{it.owner_, it.seg_id_, it.bucket_id_, 0};
}
void Clear();
private:
template <typename U, typename V>
std::pair<iterator, bool> InsertInternal(U&& key, V&& value, const EvictionPolicy& policy);
void IncreaseDepth(unsigned new_depth);
void Split(uint32_t seg_id);
template <typename Cb> void IterateUnique(Cb&& cb);
size_t NextSeg(size_t sid) const {
size_t delta = (1u << (global_depth_ - segment_[sid]->local_depth()));
return sid + delta;
}
auto EqPred() const {
return [p = &policy_](const auto& a, const auto& b) -> bool { return p->Equal(a, b); };
}
Policy policy_;
std::pmr::vector<SegmentType*> segment_;
};
template <typename _Key, typename _Value, typename Policy>
DashTable<_Key, _Value, Policy>::DashTable(size_t capacity_log, const Policy& policy,
std::pmr::memory_resource* mr)
: Base(capacity_log), policy_(policy), segment_(mr) {
assert(capacity_log > 0u);
segment_.resize(unique_segments_);
std::pmr::polymorphic_allocator<SegmentType> pa(mr);
for (auto& ptr : segment_) {
ptr = pa.allocate(1);
pa.construct(ptr, global_depth_); // new SegmentType(global_depth_);
}
}
template <typename _Key, typename _Value, typename Policy>
DashTable<_Key, _Value, Policy>::~DashTable() {
Clear();
auto* resource = segment_.get_allocator().resource();
std::pmr::polymorphic_allocator<SegmentType> pa(resource);
IterateUnique([&](SegmentType* seg) {
pa.destroy(seg);
pa.deallocate(seg, 1);
return false;
});
}
template <typename _Key, typename _Value, typename Policy>
void DashTable<_Key, _Value, Policy>::Clear() {
auto cb = [this](SegmentType* seg) {
seg->TraverseAll([this, seg](const SegmentIterator& it) {
policy_.DestroyKey(seg->Key(it.index, it.slot));
policy_.DestroyValue(seg->Value(it.index, it.slot));
});
seg->Clear();
return false;
};
IterateUnique(cb);
size_ = 0;
}
template <typename _Key, typename _Value, typename Policy>
template <typename Cb>
void DashTable<_Key, _Value, Policy>::IterateUnique(Cb&& cb) {
size_t i = 0;
while (i < segment_.size()) {
auto* seg = segment_[i];
size_t next_id = NextSeg(i);
if (cb(seg))
break;
i = next_id;
}
}
template <typename _Key, typename _Value, typename Policy>
template <typename U>
auto DashTable<_Key, _Value, Policy>::Find(U&& key) const -> const_iterator {
uint64_t key_hash = DoHash(key);
size_t seg_id = SegmentId(key_hash);
const auto* target = segment_[seg_id];
auto seg_it = target->FindIt(key, key_hash, EqPred());
if (seg_it.found()) {
return const_iterator{this, seg_id, seg_it.index, seg_it.slot};
}
return const_iterator{};
}
template <typename _Key, typename _Value, typename Policy>
template <typename U>
auto DashTable<_Key, _Value, Policy>::Find(U&& key) -> iterator {
uint64_t key_hash = DoHash(key);
size_t x = SegmentId(key_hash);
const auto* target = segment_[x];
auto seg_it = target->FindIt(key, key_hash, EqPred());
if (seg_it.found()) {
return iterator{this, uint32_t(x), seg_it.index, seg_it.slot};
}
return iterator{};
}
template <typename _Key, typename _Value, typename Policy>
size_t DashTable<_Key, _Value, Policy>::Erase(const Key_t& key) {
uint64_t key_hash = DoHash(key);
size_t x = SegmentId(key_hash);
auto* target = segment_[x];
auto it = target->FindIt(key, key_hash, EqPred());
if (!it.found())
return 0;
policy_.DestroyKey(target->Key(it.index, it.slot));
policy_.DestroyValue(target->Value(it.index, it.slot));
target->Delete(it, key_hash);
return 1;
}
template <typename _Key, typename _Value, typename Policy>
void DashTable<_Key, _Value, Policy>::Erase(iterator it) {
auto* target = segment_[it.seg_id_];
uint64_t key_hash = DoHash(it.key());
SegmentIterator sit{it.bucket_id_, it.slot_id_};
policy_.DestroyKey(*it.mutable_key());
policy_.DestroyValue(it.value());
target->Delete(sit, key_hash);
}
template <typename _Key, typename _Value, typename Policy>
void DashTable<_Key, _Value, Policy>::Reserve(size_t size) {
if (size <= bucket_count())
return;
size_t sg_floor = (size - 1) / SegmentType::capacity();
if (sg_floor < segment_.size()) {
return;
}
assert(sg_floor > 1u);
unsigned new_depth = 1 + (63 ^ __builtin_clzll(sg_floor));
IncreaseDepth(new_depth);
}
template <typename _Key, typename _Value, typename Policy>
template <typename U, typename V>
auto DashTable<_Key, _Value, Policy>::InsertInternal(U&& key, V&& value, const EvictionPolicy& ev)
-> std::pair<iterator, bool> {
uint64_t key_hash = DoHash(key);
uint32_t seg_id = SegmentId(key_hash);
while (true) {
// Keep last global_depth_ msb bits of the hash.
assert(seg_id < segment_.size());
SegmentType* target = segment_[seg_id];
auto [it, res] =
target->Insert(std::forward<U>(key), std::forward<V>(value), key_hash, EqPred());
if (res) { // success
++size_;
return std::make_pair(iterator{this, seg_id, it.index, it.slot}, true);
}
/*duplicate insert, insertion failure*/
if (it.found()) {
return std::make_pair(iterator{this, seg_id, it.index, it.slot}, false);
}
// We need to resize the table but first check if we need to trigger
// eviction policy.
if (SegmentType::capacity() + bucket_count() > ev.max_capacity) {
if (ev.evict_cb) {
// Try eviction.
uint8_t bid[2];
SegmentType::FillProbeArray(key_hash, bid);
EvictionCandidates candidates;
candidates.key_hash = key_hash;
candidates.iter[0] = bucket_iterator{this, seg_id, bid[0], 0};
candidates.iter[1] = bucket_iterator{this, seg_id, bid[1], 0};
for (unsigned i = 0; i < Policy::kStashBucketNum; ++i) {
candidates.iter[2 + i] = bucket_iterator{this, seg_id, uint8_t(kLogicalBucketNum + i), 0};
}
unsigned deleted = ev.evict_cb(candidates);
if (deleted) {
continue; // Succeed to evict - retry insertion.
}
}
break; // stop, we can not grow
}
// Split the segment.
if (target->local_depth() == global_depth_) {
IncreaseDepth(global_depth_ + 1);
seg_id = SegmentId(key_hash);
assert(seg_id < segment_.size() && segment_[seg_id] == target);
}
Split(seg_id);
}
return std::make_pair(iterator{}, false);
}
template <typename _Key, typename _Value, typename Policy>
void DashTable<_Key, _Value, Policy>::IncreaseDepth(unsigned new_depth) {
assert(!segment_.empty());
assert(new_depth > global_depth_);
size_t prev_sz = segment_.size();
size_t repl_cnt = 1ul << (new_depth - global_depth_);
segment_.resize(1ul << new_depth);
for (int i = prev_sz - 1; i >= 0; --i) {
size_t offs = i * repl_cnt;
std::fill(segment_.begin() + offs, segment_.begin() + offs + repl_cnt, segment_[i]);
}
global_depth_ = new_depth;
}
template <typename _Key, typename _Value, typename Policy>
void DashTable<_Key, _Value, Policy>::Split(uint32_t seg_id) {
SegmentType* source = segment_[seg_id];
size_t chunk_size = 1u << (global_depth_ - source->local_depth());
size_t start_idx = seg_id & (~(chunk_size - 1));
assert(segment_[start_idx] == source && segment_[start_idx + chunk_size - 1] == source);
std::pmr::polymorphic_allocator<SegmentType> alloc(segment_.get_allocator().resource());
SegmentType* target = alloc.allocate(1);
alloc.construct(target, source->local_depth() + 1);
auto cb = [this](const auto& k) { return policy_.HashFn(k); };
source->Split(std::move(cb), target); // increases the depth.
++unique_segments_;
for (size_t i = start_idx + chunk_size / 2; i < start_idx + chunk_size; ++i) {
segment_[i] = target;
}
}
template <typename _Key, typename _Value, typename Policy>
template <typename Cb>
uint64_t DashTable<_Key, _Value, Policy>::Traverse(uint64_t cursor, Cb&& cb) {
unsigned bid = (cursor >> 8) & 0xFF;
uint32_t sid = SegmentId(cursor);
auto hash_fun = [this](const auto& k) { return policy_.HashFn(k); };
bool fetched = false;
while (!fetched && bid < kLogicalBucketNum) {
SegmentType& s = *segment_[sid];
auto dt_cb = [&](const SegmentIterator& it) { cb(iterator{this, sid, it.index, it.slot}); };
fetched = s.TraverseLogicalBucket(bid, hash_fun, std::move(dt_cb));
sid = NextSeg(sid);
if (sid >= segment_.size()) {
sid = 0;
++bid;
}
}
return bid >= kLogicalBucketNum ? 0 : (uint64_t(sid) << (64 - global_depth_)) | (bid << 8);
}
template <typename _Key, typename _Value, typename Policy>
template <typename Cb>
void DashTable<_Key, _Value, Policy>::TraverseBucket(const_iterator it, Cb&& cb) {
SegmentType& s = *segment_[it.seg_id_];
const auto& b = s.GetBucket(it.bucket_id_);
b.ForEachSlot([&](uint8_t slot, bool probe) {
cb(iterator{this, it.seg_id_, it.bucket_id_, slot});
});
}
} // namespace dfly

1039
core/dash_internal.h Normal file

File diff suppressed because it is too large Load diff

634
core/dash_test.cc Normal file
View file

@ -0,0 +1,634 @@
// Copyright 2021, Roman Gershman. All rights reserved.
// See LICENSE for licensing terms.
//
#define ENABLE_DASH_STATS
#include "core/dash.h"
#include <absl/container/flat_hash_map.h>
#include <malloc.h>
#include <functional>
#include <set>
#include "base/gtest.h"
#include "base/hash.h"
#include "base/logging.h"
#include "base/zipf_gen.h"
extern "C" {
#include "redis/dict.h"
#include "redis/sds.h"
}
namespace dfly {
static uint64_t callbackHash(const void* key) {
return XXH64(&key, sizeof(key), 0);
}
static dictType IntDict = {callbackHash, NULL, NULL, NULL, NULL, NULL, NULL};
static uint64_t dictSdsHash(const void* key) {
return dictGenHashFunction((unsigned char*)key, sdslen((char*)key));
}
static int dictSdsKeyCompare(dict* , const void* key1, const void* key2) {
int l1, l2;
l1 = sdslen((sds)key1);
l2 = sdslen((sds)key2);
if (l1 != l2)
return 0;
return memcmp(key1, key2, l1) == 0;
}
static dictType SdsDict = {
dictSdsHash, /* hash function */
NULL, /* key dup */
NULL, /* val dup */
dictSdsKeyCompare, /* key compare */
NULL,
// dictSdsDestructor, /* key destructor */
NULL, /* val destructor */
NULL,
};
using namespace std;
struct Buf24 {
char buf[20];
uint32_t index;
Buf24(uint32_t i = 0) : index(i) {
}
};
struct UInt64Policy : public BasicDashPolicy {
static uint64_t HashFn(uint64_t v) {
return XXH3_64bits(&v, sizeof(v));
}
};
using Segment = detail::Segment<uint64_t, Buf24>;
using Dash64 = DashTable<uint64_t, uint64_t, UInt64Policy>;
constexpr auto kSegTax = Segment::kTaxSize;
constexpr size_t kMaxSize = Segment::kMaxSize;
constexpr size_t kSegSize = sizeof(Segment);
constexpr size_t foo = Segment::kBucketSz;
class DashTest : public testing::Test {
protected:
DashTest() : segment_(1) {
}
bool Find(Segment::Key_t key, Segment::Value_t* val) const {
uint64_t hash = dt_.DoHash(key);
std::equal_to<Segment::Key_t> eq;
auto it = segment_.FindIt(key, hash, eq);
if (!it.found())
return false;
*val = segment_.Value(it.index, it.slot);
return true;
}
bool Find(Segment::Key_t key) const {
uint64_t hash = dt_.DoHash(key);
std::equal_to<Segment::Key_t> eq;
auto it = segment_.FindIt(key, hash, eq);
return it.found();
}
set<Segment::Key_t> FillSegment(unsigned bid);
Segment segment_;
Dash64 dt_;
};
set<Segment::Key_t> DashTest::FillSegment(unsigned bid) {
std::set<Segment::Key_t> keys;
std::equal_to<Segment::Key_t> eq;
for (Segment::Key_t key = 0; key < 1000000u; ++key) {
uint64_t hash = dt_.DoHash(key);
unsigned bi = (hash >> 8) % Segment::kNumBuckets;
if (bi != bid)
continue;
uint8_t fp = hash & 0xFF;
if (fp > 2) // limit fps considerably to find interesting cases.
continue;
auto [it, success] = segment_.Insert(key, 0, hash, eq);
if (!success) {
LOG(INFO) << "Stopped at " << key;
break;
}
CHECK(it.found());
keys.insert(key);
}
return keys;
}
TEST_F(DashTest, Hash) {
for (uint64_t i = 0; i < 100; ++i) {
uint64_t hash = dt_.DoHash(i);
if (hash >> 63) {
VLOG(1) << "i " << i << ", Hash " << hash;
}
}
}
TEST_F(DashTest, SlotBitmap) {
detail::SlotBitmap<14> slot;
slot.SetSlot(1, true);
slot.SetSlot(5, false);
EXPECT_EQ(34, slot.GetBusy());
EXPECT_EQ(2, slot.GetProbe(true));
}
TEST_F(DashTest, Basic) {
Segment::Key_t key = 0;
Segment::Value_t val = 0;
uint64_t hash = dt_.DoHash(key);
std::equal_to<Segment::Key_t> eq;
EXPECT_TRUE(segment_.Insert(key, val, hash, eq).second);
auto [it, res] = segment_.Insert(key, val, hash, eq);
EXPECT_TRUE(!res && it.found());
EXPECT_TRUE(Find(key, &val));
EXPECT_EQ(0, val.index);
EXPECT_FALSE(Find(1, &val));
EXPECT_EQ(1, segment_.SlowSize());
unsigned has_called = 0;
auto cb = [&](const auto& it) { ++has_called; };
auto hfun = &UInt64Policy::HashFn;
auto cursor = segment_.TraverseLogicalBucket((hash >> 8) % Segment::kNumBuckets, hfun, cb);
ASSERT_EQ(1, has_called);
ASSERT_EQ(0, segment_.TraverseLogicalBucket(cursor, hfun, cb));
ASSERT_EQ(1, has_called);
}
TEST_F(DashTest, Segment) {
std::unique_ptr<Segment> seg(new Segment(1));
LOG(INFO) << "Segment size " << sizeof(Segment)
<< " malloc size: " << malloc_usable_size(seg.get());
set<Segment::Key_t> keys = FillSegment(0);
EXPECT_TRUE(segment_.GetBucket(0).IsFull() && segment_.GetBucket(1).IsFull());
for (size_t i = 2; i < Segment::kNumBuckets; ++i) {
EXPECT_EQ(0, segment_.GetBucket(i).Size());
}
EXPECT_EQ(4 * Segment::kNumSlots, keys.size());
EXPECT_EQ(4 * Segment::kNumSlots, segment_.SlowSize());
auto hfun = &UInt64Policy::HashFn;
unsigned has_called = 0;
auto cb = [&](const Segment::Iterator& it) {
++has_called;
ASSERT_EQ(1, keys.count(segment_.Key(it.index, it.slot)));
};
segment_.TraverseAll(cb);
ASSERT_EQ(keys.size(), has_called);
ASSERT_TRUE(segment_.GetBucket(Segment::kNumBuckets).IsFull());
std::array<uint64_t, Segment::kNumSlots * 2> arr;
uint64_t* next = arr.begin();
for (unsigned i = Segment::kNumBuckets; i < Segment::kNumBuckets + 2; ++i) {
const auto* k = &segment_.Key(i, 0);
next = std::copy(k, k + Segment::kNumSlots, next);
}
std::equal_to<Segment::Key_t> eq;
for (auto k : arr) {
auto hash = hfun(k);
auto it = segment_.FindIt(k, hash, eq);
ASSERT_TRUE(it.found());
segment_.Delete(it, hash);
}
EXPECT_EQ(2 * Segment::kNumSlots, segment_.SlowSize());
ASSERT_FALSE(Find(arr.front()));
}
TEST_F(DashTest, SegmentFull) {
std::equal_to<Segment::Key_t> eq;
for (Segment::Key_t key = 8000; key < 15000u; ++key) {
uint64_t hash = dt_.DoHash(key);
bool res = segment_.Insert(key, 0, hash, eq).second;
if (!res) {
LOG(INFO) << "Stopped at " << key;
break;
}
}
EXPECT_GT(segment_.SlowSize(), Segment::capacity() * 0.85);
LOG(INFO) << "Utilization " << double(segment_.SlowSize()) / Segment::capacity()
<< " num probing buckets: " << segment_.NumProbingBuckets();
LOG(INFO) << "NB: " << segment_.stats.neighbour_probes << " SP: " << segment_.stats.stash_probes
<< " SOP: " << segment_.stats.stash_overflow_probes;
segment_.stats.neighbour_probes = segment_.stats.stash_overflow_probes =
segment_.stats.stash_probes = 0;
for (Segment::Key_t key = 0; key < 10000u; ++key) {
Find(key);
}
LOG(INFO) << segment_.stats.neighbour_probes << " " << segment_.stats.stash_probes << " "
<< segment_.stats.stash_overflow_probes;
uint32_t busy = segment_.GetBucket(0).GetBusy();
uint32_t probe = segment_.GetBucket(0).GetProbe(true);
EXPECT_EQ((1 << 12) - 1, busy); // Size 12
EXPECT_EQ(539, probe); // verified by running since the test is determenistic.
unsigned keys[12] = {8045, 8085, 8217, 8330, 8337, 8381, 8432, 8506, 8587, 8605, 8612, 8725};
for (unsigned i = 0; i < 12; ++i) {
ASSERT_EQ(keys[i], segment_.Key(0, i));
}
}
TEST_F(DashTest, Split) {
set<Segment::Key_t> keys = FillSegment(0);
Segment::Value_t val;
Segment s2{2};
segment_.Split(&UInt64Policy::HashFn, &s2);
unsigned sum[2] = {0};
std::equal_to<Segment::Key_t> eq;
for (auto key : keys) {
auto it1 = segment_.FindIt(key, dt_.DoHash(key), eq);
auto it2 = s2.FindIt(key, dt_.DoHash(key), eq);
ASSERT_NE(it1.found(), it2.found()) << key;
sum[0] += it1.found();
sum[1] += it2.found();
}
ASSERT_EQ(segment_.SlowSize(), sum[0]);
EXPECT_EQ(s2.SlowSize(), sum[1]);
EXPECT_EQ(keys.size(), sum[0] + sum[1]);
EXPECT_EQ(4 * Segment::kNumSlots, keys.size());
}
TEST_F(DashTest, Insert2) {
uint64_t k = 1191;
ASSERT_EQ(2019837007031366716, UInt64Policy::HashFn(k));
Dash64 dt;
for (unsigned i = 0; i < 2000; ++i) {
dt.Insert(i, 0);
}
}
struct Item {
char buf[24];
};
constexpr size_t ItemAlign = alignof(Item);
struct MyBucket : public detail::BucketBase<16, 4> {
Item key[14];
};
constexpr size_t kMySz = sizeof(MyBucket);
constexpr size_t kBBSz = sizeof(detail::BucketBase<16, 4>);
TEST_F(DashTest, Custom) {
using ItemSegment = detail::Segment<Item, uint64_t>;
constexpr double kTax = ItemSegment::kTaxSize;
constexpr size_t kMaxSize = ItemSegment::kMaxSize;
constexpr size_t kSegSize = sizeof(ItemSegment);
constexpr size_t kBuckSz = ItemSegment::kBucketSz;
(void)kTax;
(void)kMaxSize;
(void)kSegSize;
(void)kBuckSz;
ItemSegment seg{2};
auto cb = [](auto v, auto u) { return v.buf[0] == u.buf[0] && v.buf[1] == u.buf[1]; };
auto it = seg.FindIt(Item{1, 1}, 42, cb);
ASSERT_FALSE(it.found());
}
TEST_F(DashTest, Reserve) {
unsigned bc = dt_.bucket_count();
for (unsigned i = 0; i <= bc * 2; ++i) {
dt_.Reserve(i);
ASSERT_GE((1 << dt_.depth()) * Dash64::kSegCapacity, i);
}
}
TEST_F(DashTest, Insert) {
constexpr size_t kNumItems = 10000;
double sum = 0;
for (size_t i = 0; i < kNumItems; ++i) {
dt_.Insert(i, i);
double u = (dt_.size() * 100.0) / (dt_.unique_segments() * Segment::capacity());
sum += u;
VLOG(1) << "Num items " << dt_.size() << ", load factor " << u << ", size per entry "
<< double(dt_.mem_usage()) / dt_.size();
}
EXPECT_EQ(kNumItems, dt_.size());
LOG(INFO) << "Average load factor is " << sum / kNumItems;
for (size_t i = 0; i < kNumItems; ++i) {
Dash64::const_iterator it = dt_.Find(i);
ASSERT_TRUE(it != dt_.end());
ASSERT_EQ(it.value(), i);
ASSERT_LE(dt_.load_factor(), 1) << i;
}
for (size_t i = kNumItems; i < kNumItems * 10; ++i) {
Dash64::const_iterator it = dt_.Find(i);
ASSERT_TRUE(it == dt_.end());
}
EXPECT_EQ(1, dt_.Erase(0));
EXPECT_EQ(0, dt_.Erase(0));
auto it = dt_.begin();
ASSERT_TRUE(it.is_valid());
auto some_val = *it;
dt_.Erase(it);
ASSERT_FALSE(dt_.Find(some_val).is_valid());
}
TEST_F(DashTest, Traverse) {
constexpr auto kNumItems = 50;
for (size_t i = 0; i < kNumItems; ++i) {
dt_.Insert(i, i);
}
uint64_t cursor = 0;
vector<unsigned> nums;
auto tr_cb = [&](const Dash64::iterator& it) {
nums.push_back(it.key());
VLOG(1) << it.bucket_id() << " " << it.slot_id() << " " << it.key();
};
do {
cursor = dt_.Traverse(cursor, tr_cb);
} while (cursor != 0);
sort(nums.begin(), nums.end());
nums.resize(unique(nums.begin(), nums.end()) - nums.begin());
ASSERT_EQ(kNumItems, nums.size());
EXPECT_EQ(0, nums[0]);
EXPECT_EQ(kNumItems - 1, nums.back());
}
TEST_F(DashTest, Bucket) {
constexpr auto kNumItems = 250;
for (size_t i = 0; i < kNumItems; ++i) {
dt_.Insert(i, 0);
}
std::vector<uint64_t> s;
auto it = dt_.begin();
auto bucket_it = Dash64::bucket_it(it);
dt_.TraverseBucket(it, [&](auto i) { s.push_back(i.key()); });
unsigned num_items = 0;
while (bucket_it.is_valid()) {
ASSERT_TRUE(find(s.begin(), s.end(), bucket_it.key()) != s.end());
++bucket_it;
++num_items;
}
EXPECT_EQ(s.size(), num_items);
}
TEST_F(DashTest, Eviction) {
Dash64::EvictionPolicy ev;
ev.max_capacity = 1500;
size_t i = 0;
for (; i < 5000; ++i) {
auto [it, res] = dt_.Insert(i, 0, ev);
if (!res)
break;
}
ASSERT_LT(i, 5000);
EXPECT_LT(dt_.size(), ev.max_capacity);
LOG(INFO) << "size is " << dt_.size();
unsigned bucket_cnt = dt_.bucket_count();
Dash64::EvictionCb cb = [this](const Dash64::EvictionCandidates& cand) -> unsigned {
auto it = cand.iter[0];
unsigned res = 0;
for (; it.is_valid(); ++it) {
LOG(INFO) << "Deleting " << it.key();
dt_.Erase(it);
++res;
}
return res;
};
ev.evict_cb = cb;
auto [it, res] = dt_.Insert(i, 0, ev);
EXPECT_TRUE(res);
EXPECT_EQ(bucket_cnt, dt_.bucket_count());
}
struct A {
int a = 0;
unsigned moved = 0;
A(int i = 0) : a(i) {
}
A(const A&) = delete;
A(A&& o) : a(o.a), moved(o.moved + 1) {
o.a = -1;
}
A& operator=(const A&) = delete;
A& operator=(A&& o) {
o.moved = o.moved + 1;
a = o.a;
o.a = -1;
return *this;
}
bool operator==(const A& o) const {
return o.a == a;
}
};
struct ADashPolicy : public BasicDashPolicy {
static uint64_t HashFn(const A& a) {
auto val = XXH3_64bits(&a.a, sizeof(a.a));
return val;
}
};
TEST_F(DashTest, Moveable) {
using DType = DashTable<A, A, ADashPolicy>;
DType table{1};
ASSERT_TRUE(table.Insert(A{1}, A{2}).second);
ASSERT_FALSE(table.Insert(A{1}, A{3}).second);
EXPECT_EQ(1, table.size());
table.Clear();
EXPECT_EQ(0, table.size());
}
struct SdsDashPolicy {
enum { kSlotNum = 12, kBucketNum = 64, kStashBucketNum = 2 };
static uint64_t HashFn(sds u) {
return XXH3_64bits(reinterpret_cast<const uint8_t*>(u), sdslen(u));
}
static uint64_t HashFn(std::string_view u) {
return XXH3_64bits(u.data(), u.size());
}
static void DestroyValue(uint64_t) {
}
static void DestroyKey(sds s) {
sdsfree(s);
}
static bool Equal(sds u1, sds u2) {
return dictSdsKeyCompare(nullptr, u1, u2) == 0;
}
static bool Equal(sds u1, std::string_view u2) {
return u2 == std::string_view{u1, sdslen(u1)};
}
};
TEST_F(DashTest, Sds) {
DashTable<sds, uint64_t, SdsDashPolicy> dt;
sds foo = sdscatlen(sdsempty(), "foo", 3);
dt.Insert(foo, 0);
// dt.Insert(std::string_view{"bar"}, 1);
}
// Benchmarks
static void BM_Insert(benchmark::State& state) {
unsigned count = state.range(0);
size_t next = 0;
while (state.KeepRunning()) {
Dash64 dt;
for (unsigned i = 0; i < count; ++i) {
dt.Insert(next++, 0);
}
}
}
BENCHMARK(BM_Insert)->Arg(1000)->Arg(10000)->Arg(100000);
struct NoDestroySdsPolicy : public SdsDashPolicy {
static void DestroyKey(sds s) {
}
};
static void BM_StringInsert(benchmark::State& state) {
unsigned count = state.range(0);
std::vector<sds> strs(count);
for (unsigned i = 0; i < count; ++i) {
strs[i] = sdscatprintf(sdsempty(), "key__%x", 100 + i);
}
while (state.KeepRunning()) {
DashTable<sds, uint64_t, NoDestroySdsPolicy> dt;
for (unsigned i = 0; i < count; ++i) {
dt.Insert(strs[i], 0);
}
}
for (sds s : strs) {
sdsfree(s);
}
}
BENCHMARK(BM_StringInsert)->Arg(1000)->Arg(10000)->Arg(100000);
static void BM_FindExisting(benchmark::State& state) {
unsigned count = state.range(0);
Dash64 dt;
for (unsigned i = 0; i < count; ++i) {
dt.Insert(i, 0);
}
size_t next = 0;
while (state.KeepRunning()) {
for (unsigned i = 0; i < 100; ++i) {
dt.Find(next++);
}
}
}
BENCHMARK(BM_FindExisting)->Arg(1000000)->Arg(2000000);
// dict memory usage is in [32*n + 8*n, 32*n + 16*n], or
// per entry usage is [40, 48].
static void BM_RedisDictFind(benchmark::State& state) {
unsigned count = state.range(0);
dict* d = dictCreate(&IntDict);
for (unsigned i = 0; i < count; ++i) {
size_t key = i;
dictAdd(d, (void*)key, nullptr);
}
size_t next = 0;
while (state.KeepRunning()) {
for (size_t i = 0; i < 100; ++i) {
size_t k = next++;
dictFind(d, (void*)k);
}
}
dictRelease(d);
}
BENCHMARK(BM_RedisDictFind)->Arg(1000000)->Arg(2000000);
// dict memory usage is in [32*n + 8*n, 32*n + 16*n], or
// per entry usage is [40, 48].
static void BM_RedisDictInsert(benchmark::State& state) {
unsigned count = state.range(0);
size_t next = 0;
while (state.KeepRunning()) {
dict* d = dictCreate(&IntDict);
for (unsigned i = 0; i < count; ++i) {
dictAdd(d, (void*)next, nullptr);
++next;
}
dictRelease(d);
}
}
BENCHMARK(BM_RedisDictInsert)->Arg(1000)->Arg(10000)->Arg(100000);
static void BM_RedisStringInsert(benchmark::State& state) {
unsigned count = state.range(0);
std::vector<sds> strs(count);
for (unsigned i = 0; i < count; ++i) {
strs[i] = sdscatprintf(sdsempty(), "key__%x", 100 + i);
}
while (state.KeepRunning()) {
dict* d = dictCreate(&SdsDict);
for (unsigned i = 0; i < count; ++i) {
dictAdd(d, strs[i], nullptr);
}
dictRelease(d);
}
for (sds s : strs) {
sdsfree(s);
}
}
BENCHMARK(BM_RedisStringInsert)->Arg(1000)->Arg(10000)->Arg(100000);
} // namespace dfly