1
0
Fork 0
mirror of https://github.com/dragonflydb/dragonfly.git synced 2024-12-14 11:58:02 +00:00

chore: fix fiber types in the codebase (#2574)

Reduce reliance on core/fibers

Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
Roman Gershman 2024-02-12 16:29:28 +02:00 committed by GitHub
parent 4000adf57f
commit b3b9080901
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 45 additions and 54 deletions

View file

@ -12,20 +12,9 @@
namespace dfly {
using util::fb2::Barrier;
using util::fb2::BlockingCounter;
using util::fb2::CondVar;
using util::fb2::Done;
using util::fb2::EventCount;
using util::fb2::Fiber;
using util::fb2::Future;
using util::fb2::Launch;
using util::fb2::Mutex;
using util::fb2::Promise;
using util::fb2::SimpleChannel;
} // namespace dfly
namespace util {
using fb2::SharedMutex;
}

View file

@ -308,7 +308,7 @@ class Connection : public util::Connection {
// Block until memory usage is below limit, can be called from any thread
void EnsureBelowLimit();
dfly::EventCount ec;
util::fb2::EventCount ec;
std::atomic_size_t subscriber_bytes = 0;
size_t subscriber_thread_limit = 0; // cached flag subscriber_thread_limit
@ -377,7 +377,7 @@ class Connection : public util::Connection {
void DecreaseStatsOnClose();
std::deque<MessageHandle> dispatch_q_; // dispatch queue
dfly::EventCount evc_; // dispatch queue waker
util::fb2::EventCount evc_; // dispatch queue waker
util::fb2::Fiber dispatch_fb_; // dispatch fiber (if started)
size_t pending_pipeline_cmd_cnt_ = 0; // how many queued async commands in dispatch_q

View file

@ -14,21 +14,23 @@
ABSL_DECLARE_FLAG(std::string, requirepass);
using namespace util;
namespace dfly::acl {
void UserRegistry::MaybeAddAndUpdate(std::string_view username, User::UpdateRequest req) {
std::unique_lock<util::SharedMutex> lock(mu_);
std::unique_lock<fb2::SharedMutex> lock(mu_);
auto& user = registry_[username];
user.Update(std::move(req));
}
bool UserRegistry::RemoveUser(std::string_view username) {
std::unique_lock<util::SharedMutex> lock(mu_);
std::unique_lock<fb2::SharedMutex> lock(mu_);
return registry_.erase(username);
}
UserRegistry::UserCredentials UserRegistry::GetCredentials(std::string_view username) const {
std::shared_lock<util::SharedMutex> lock(mu_);
std::shared_lock<fb2::SharedMutex> lock(mu_);
auto it = registry_.find(username);
if (it == registry_.end()) {
return {};
@ -37,7 +39,7 @@ UserRegistry::UserCredentials UserRegistry::GetCredentials(std::string_view user
}
bool UserRegistry::IsUserActive(std::string_view username) const {
std::shared_lock<util::SharedMutex> lock(mu_);
std::shared_lock<fb2::SharedMutex> lock(mu_);
auto it = registry_.find(username);
if (it == registry_.end()) {
return false;
@ -46,7 +48,7 @@ bool UserRegistry::IsUserActive(std::string_view username) const {
}
bool UserRegistry::AuthUser(std::string_view username, std::string_view password) const {
std::shared_lock<util::SharedMutex> lock(mu_);
std::shared_lock<fb2::SharedMutex> lock(mu_);
const auto& user = registry_.find(username);
if (user == registry_.end()) {
return false;
@ -56,23 +58,23 @@ bool UserRegistry::AuthUser(std::string_view username, std::string_view password
}
UserRegistry::RegistryViewWithLock UserRegistry::GetRegistryWithLock() const {
std::shared_lock<util::SharedMutex> lock(mu_);
std::shared_lock<fb2::SharedMutex> lock(mu_);
return {std::move(lock), registry_};
}
UserRegistry::RegistryWithWriteLock UserRegistry::GetRegistryWithWriteLock() {
std::unique_lock<util::SharedMutex> lock(mu_);
std::unique_lock<fb2::SharedMutex> lock(mu_);
return {std::move(lock), registry_};
}
UserRegistry::UserWithWriteLock::UserWithWriteLock(std::unique_lock<util::SharedMutex> lk,
UserRegistry::UserWithWriteLock::UserWithWriteLock(std::unique_lock<fb2::SharedMutex> lk,
const User& user, bool exists)
: user(user), exists(exists), registry_lk_(std::move(lk)) {
}
UserRegistry::UserWithWriteLock UserRegistry::MaybeAddAndUpdateWithLock(std::string_view username,
User::UpdateRequest req) {
std::unique_lock<util::SharedMutex> lock(mu_);
std::unique_lock<fb2::SharedMutex> lock(mu_);
const bool exists = registry_.contains(username);
auto& user = registry_[username];
user.Update(std::move(req));

View file

@ -5,7 +5,6 @@
#pragma once
#include <absl/container/flat_hash_map.h>
#include <absl/synchronization/mutex.h>
#include <algorithm>
#include <shared_mutex>
@ -13,8 +12,8 @@
#include <utility>
#include <vector>
#include "core/fibers.h"
#include "server/acl/user.h"
#include "util/fibers/synchronization.h"
namespace dfly::acl {
@ -70,12 +69,12 @@ class UserRegistry {
// Helper class for accessing a user with a ReadLock outside the scope of UserRegistry
class UserWithWriteLock {
public:
UserWithWriteLock(std::unique_lock<util::SharedMutex> lk, const User& user, bool exists);
UserWithWriteLock(std::unique_lock<util::fb2::SharedMutex> lk, const User& user, bool exists);
const User& user;
const bool exists;
private:
std::unique_lock<util::SharedMutex> registry_lk_;
std::unique_lock<util::fb2::SharedMutex> registry_lk_;
};
UserWithWriteLock MaybeAddAndUpdateWithLock(std::string_view username, User::UpdateRequest req);
@ -84,18 +83,18 @@ class UserRegistry {
private:
RegistryType registry_;
mutable util::SharedMutex mu_;
mutable util::fb2::SharedMutex mu_;
// Helper class for accessing the registry with a ReadLock outside the scope of UserRegistry
template <template <typename T> typename LockT, typename RegT> class RegistryWithLock {
public:
RegistryWithLock(LockT<util::SharedMutex> lk, RegT reg)
RegistryWithLock(LockT<util::fb2::SharedMutex> lk, RegT reg)
: registry(reg), registry_lk_(std::move(lk)) {
}
RegT registry;
private:
LockT<util::SharedMutex> registry_lk_;
LockT<util::fb2::SharedMutex> registry_lk_;
};
};

View file

@ -244,7 +244,7 @@ class EngineShard {
uint32_t defrag_task_ = 0;
Fiber fiber_periodic_;
Done fiber_periodic_done_;
util::fb2::Done fiber_periodic_done_;
DefragTaskState defrag_state_;
std::unique_ptr<TieredStorage> tiered_storage_;
@ -322,7 +322,7 @@ class EngineShardSet {
// The functions running inside the shard queue run atomically (sequentially)
// with respect each other on the same shard.
template <typename U> void AwaitRunningOnShardQueue(U&& func) {
BlockingCounter bc{unsigned(shard_queue_.size())};
util::fb2::BlockingCounter bc{unsigned(shard_queue_.size())};
for (size_t i = 0; i < shard_queue_.size(); ++i) {
Add(i, [&func, bc]() mutable {
func(EngineShard::tlocal());
@ -347,7 +347,7 @@ class EngineShardSet {
template <typename U, typename P>
void EngineShardSet::RunBriefInParallel(U&& func, P&& pred) const {
BlockingCounter bc{0};
util::fb2::BlockingCounter bc{0};
for (uint32_t i = 0; i < size(); ++i) {
if (!pred(i))
@ -364,7 +364,7 @@ void EngineShardSet::RunBriefInParallel(U&& func, P&& pred) const {
}
template <typename U, typename P> void EngineShardSet::RunBlockingInParallel(U&& func, P&& pred) {
BlockingCounter bc{0};
util::fb2::BlockingCounter bc{0};
static_assert(std::is_invocable_v<U, EngineShard*>,
"Argument must be invocable EngineShard* as argument.");
static_assert(std::is_void_v<std::invoke_result_t<U, EngineShard*>>,

View file

@ -64,9 +64,9 @@ class BufferedStreamerBase : public io::Sink {
bool IsStopped();
protected:
bool producer_done_ = false; // whether producer is done
unsigned buffered_ = 0; // how many entries are buffered
EventCount waker_; // two sided waker
bool producer_done_ = false; // whether producer is done
unsigned buffered_ = 0; // how many entries are buffered
util::fb2::EventCount waker_; // two sided waker
const Cancellation* cll_; // global cancellation

View file

@ -68,7 +68,7 @@ class JournalSlice {
std::optional<base::RingBuffer<JournalItem>> ring_buffer_;
base::IoBuf ring_serialize_buf_;
mutable util::SharedMutex cb_mu_;
mutable util::fb2::SharedMutex cb_mu_;
std::vector<std::pair<uint32_t, ChangeCallback>> change_cb_arr_ ABSL_GUARDED_BY(cb_mu_);
LSN lsn_ = 1;

View file

@ -16,9 +16,9 @@ struct JournalReader;
class MultiShardExecution {
public:
struct TxExecutionSync {
Barrier barrier;
util::fb2::Barrier barrier;
std::atomic_uint32_t counter;
BlockingCounter block;
util::fb2::BlockingCounter block;
explicit TxExecutionSync(uint32_t counter)
: barrier(counter), counter(counter), block(counter) {

View file

@ -148,7 +148,7 @@ class Replica : ProtocolClient {
// In redis replication mode.
Fiber sync_fb_;
Fiber acks_fb_;
EventCount waker_;
util::fb2::EventCount waker_;
std::vector<std::unique_ptr<DflyShardReplica>> shard_flows_;
// A vector of the last executer LSNs when a replication is interrupted.
@ -182,13 +182,14 @@ class DflyShardReplica : public ProtocolClient {
// Start replica initialized as dfly flow.
// Sets is_full_sync when successful.
io::Result<bool> StartSyncFlow(BlockingCounter block, Context* cntx, std::optional<LSN>);
io::Result<bool> StartSyncFlow(util::fb2::BlockingCounter block, Context* cntx,
std::optional<LSN>);
// Transition into stable state mode as dfly flow.
std::error_code StartStableSyncFlow(Context* cntx);
// Single flow full sync fiber spawned by StartFullSyncFlow.
void FullSyncDflyFb(std::string eof_token, BlockingCounter block, Context* cntx);
void FullSyncDflyFb(std::string eof_token, util::fb2::BlockingCounter block, Context* cntx);
// Single flow stable state sync fiber spawned by StartStableSyncFlow.
void StableSyncDflyReadFb(Context* cntx);
@ -213,7 +214,7 @@ class DflyShardReplica : public ProtocolClient {
std::queue<std::pair<TransactionData, bool>> trans_data_queue_;
static constexpr size_t kYieldAfterItemsInQueue = 50;
EventCount waker_; // waker for trans_data_queue_
util::fb2::EventCount waker_; // waker for trans_data_queue_
bool use_multi_shard_exe_sync_;
std::unique_ptr<JournalExecutor> executor_;

View file

@ -807,12 +807,12 @@ struct AggregateLoadResult {
// Load starts as many fibers as there are files to load each one separately.
// It starts one more fiber that waits for all load fibers to finish and returns the first
// error (if any occured) with a future.
Future<GenericError> ServerFamily::Load(const std::string& load_path) {
fb2::Future<GenericError> ServerFamily::Load(const std::string& load_path) {
auto paths_result = snapshot_storage_->LoadPaths(load_path);
if (!paths_result) {
LOG(ERROR) << "Failed to load snapshot: " << paths_result.error().Format();
Promise<GenericError> ec_promise;
fb2::Promise<GenericError> ec_promise;
ec_promise.set_value(paths_result.error());
return ec_promise.get_future();
}
@ -856,8 +856,8 @@ Future<GenericError> ServerFamily::Load(const std::string& load_path) {
load_fibers.push_back(proactor->LaunchFiber(std::move(load_fiber)));
}
Promise<GenericError> ec_promise;
Future<GenericError> ec_future = ec_promise.get_future();
fb2::Promise<GenericError> ec_promise;
fb2::Future<GenericError> ec_future = ec_promise.get_future();
// Run fiber that empties the channel and sets ec_promise.
auto load_join_fiber = [this, aggregated_result, load_fibers = std::move(load_fibers),

View file

@ -171,7 +171,7 @@ class ServerFamily {
// Load snapshot from file (.rdb file or summary.dfs file) and return
// future with error_code.
Future<GenericError> Load(const std::string& file_name);
util::fb2::Future<GenericError> Load(const std::string& file_name);
bool IsSaving() const {
return is_saving_.load(std::memory_order_relaxed);
@ -261,7 +261,7 @@ class ServerFamily {
void SendInvalidationMessages() const;
Fiber snapshot_schedule_fb_;
Future<GenericError> load_result_;
util::fb2::Future<GenericError> load_result_;
uint32_t stats_caching_task_ = 0;
Service& service_;
@ -294,11 +294,11 @@ class ServerFamily {
// be --dbfilename.
bool save_on_shutdown_{true};
Done schedule_done_;
util::fb2::Done schedule_done_;
std::unique_ptr<util::fb2::FiberQueueThreadPool> fq_threadpool_;
std::shared_ptr<detail::SnapshotStorage> snapshot_storage_;
mutable Mutex peak_stats_mu_;
mutable util::fb2::Mutex peak_stats_mu_;
mutable PeakStats peak_stats_;
};

View file

@ -285,7 +285,7 @@ class ServerState { // public struct - to allow initialization.
// should subscribe to `client_pause_ec_` through `AwaitPauseState` to be
// notified when the break is over.
int client_pauses_[2] = {};
EventCount client_pause_ec_;
util::fb2::EventCount client_pause_ec_;
using Counter = util::SlidingCounter<7>;
Counter qps_;

View file

@ -427,7 +427,7 @@ class Transaction {
uint32_t DEBUG_Count() const; // Get current counter value
private:
std::atomic_uint32_t count_{0};
EventCount ec_{};
util::fb2::EventCount ec_{};
};
// "Single claim - single modification" barrier. Multiple threads might try to claim it, only one
@ -446,7 +446,7 @@ class Transaction {
private:
std::atomic_bool claimed_{false};
std::atomic_bool closed_{false};
EventCount ec_{};
util::fb2::EventCount ec_{};
};
private: