1
0
Fork 0
mirror of https://github.com/dragonflydb/dragonfly.git synced 2024-12-14 11:58:02 +00:00

chore: add macos daily build (#1795)

It compiles most of the code though some linking problems still exist.

Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
Roman Gershman 2023-09-06 09:35:11 +03:00 committed by GitHub
parent b8a9d1f093
commit 36be222091
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 187 additions and 41 deletions

View file

@ -36,17 +36,67 @@ jobs:
- uses: actions/checkout@v3
with:
submodules: true
- name: Run sccache-cache
uses: mozilla-actions/sccache-action@v0.0.3
- name: Configure Cache Env
uses: actions/github-script@v6
with:
script: |
core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '')
- name: Install dependencies
run: |
cmake --version
mkdir -p $GITHUB_WORKSPACE/build
- name: Install packages
if: matrix.container == 'fedora:30'
run: |
echo Passed
- name: Configure & Build
run: |
cd $GITHUB_WORKSPACE/build
cmake .. -DCMAKE_BUILD_TYPE=Debug -GNinja -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${{ matrix.flags }}
cmake .. -DCMAKE_BUILD_TYPE=Debug -GNinja -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_C_COMPILER_LAUNCHER=sccache \
${{ matrix.flags }}
ninja src/all
- name: Test
run: |
cd $GITHUB_WORKSPACE/build
ctest -V -L DFLY
build-macos:
runs-on: macos-latest
timeout-minutes: 45
steps:
- uses: actions/checkout@v3
with:
submodules: true
- name: Install dependencies
run: |
brew update && brew install ninja boost openssl automake gcc zstd icu4c bison c-ares \
autoconf libtool automake
brew info icu4c
mkdir -p $GITHUB_WORKSPACE/build
- name: Configure & Build
run: |
cd $GITHUB_WORKSPACE/build
export PATH=/usr/local/opt/bison/bin:$PATH
which gcc
which gcc-13
alias g++=g++-13
alias gcc=gcc-13
bison --version
alias g++
echo "*************************** START BUILDING **************************************"
CC=gcc-13 CXX=g++-13 cmake .. -DCMAKE_BUILD_TYPE=Debug -GNinja -DWITH_UNWIND=OFF \
-DCMAKE_PREFIX_PATH=/usr/local/opt/icu4c
ninja ok_backend dfly_core_test dconv_project
ninja dragonfly

View file

@ -22,7 +22,7 @@ sudo apt install ninja-build libunwind-dev libboost-fiber-dev libssl-dev \
On Fedora:
```bash
sudo yum install automake boost-devel g++ git cmake libtool ninja-build libzstd-devel \
sudo dnf install -y automake boost-devel g++ git cmake libtool ninja-build libzstd-devel \
openssl-devel libunwind-devel autoconf-archive patch bison libxml2-devel libicu-devel
```

View file

@ -15,13 +15,15 @@ diff --git a/makefile b/makefile
index d46e650c..c27e5677 100644
--- a/makefile
+++ b/makefile
@@ -66,13 +66,25 @@ LOCAL = $(TESTS) $(CWARNS)
@@ -66,13 +66,26 @@ LOCAL = $(TESTS) $(CWARNS)
# enable Linux goodies
-MYCFLAGS= $(LOCAL) -std=c99 -DLUA_USE_LINUX -DLUA_USE_READLINE
+MYCFLAGS= $(LOCAL) -std=c99 -g -O2 -DLUA_USE_LINUX
MYLDFLAGS= $(LOCAL) -Wl,-E
-MYLDFLAGS= $(LOCAL) -Wl,-E
+# Commenting out dynamic linking flags because we link statically
+# and this does not work on MacOS: MYLDFLAGS= $(LOCAL) -Wl,-E
-MYLIBS= -ldl -lreadline
+MYLIBS= -ldl

View file

@ -45,13 +45,19 @@ function(gen_bison name)
set_source_files_properties(${name}.cc ${name}_base.h PROPERTIES GENERATED TRUE)
endfunction()
if (APPLE)
set(SED_REPL sed "-i" '')
else()
set(SED_REPL sed "-i")
endif()
add_third_party(
dconv
URL https://github.com/google/double-conversion/archive/refs/tags/v3.3.0.tar.gz
PATCH_COMMAND sed -i "/static const std::ctype/d"
PATCH_COMMAND ${SED_REPL} "/static const std::ctype/d"
<SOURCE_DIR>/double-conversion/string-to-double.cc
COMMAND sed -i "/std::use_facet</d" <SOURCE_DIR>/double-conversion/string-to-double.cc
COMMAND sed -i "s/cType.tolower/std::tolower/g" <SOURCE_DIR>/double-conversion/string-to-double.cc
COMMAND ${SED_REPL} "/std::use_facet</d" <SOURCE_DIR>/double-conversion/string-to-double.cc
COMMAND ${SED_REPL} "s/cType.tolower/std::tolower/g" <SOURCE_DIR>/double-conversion/string-to-double.cc
LIB libdouble-conversion.a
)

View file

@ -1,11 +1,17 @@
# We have some linking problems with search on Apple
if (NOT APPLE)
add_subdirectory(search)
set(SEARCH_LIB query_parser)
endif()
add_library(dfly_core compact_object.cc dragonfly_core.cc extent_tree.cc
external_alloc.cc interpreter.cc json_object.cc mi_memory_resource.cc sds_utils.cc
segment_allocator.cc simple_lru_counter.cc score_map.cc small_string.cc sorted_map.cc
tx_queue.cc dense_set.cc
string_set.cc string_map.cc detail/bitpacking.cc)
cxx_link(dfly_core base query_parser absl::flat_hash_map absl::str_format redis_lib TRDP::lua lua_modules
fibers2 TRDP::jsoncons OpenSSL::Crypto)
cxx_link(dfly_core base absl::flat_hash_map absl::str_format redis_lib TRDP::lua lua_modules
fibers2 ${SEARCH_LIB} TRDP::jsoncons OpenSSL::Crypto)
add_executable(dash_bench dash_bench.cc)
cxx_link(dash_bench dfly_core)
@ -23,5 +29,3 @@ cxx_test(string_map_test dfly_core LABELS DFLY)
cxx_test(sorted_map_test dfly_core LABELS DFLY)
cxx_test(bptree_set_test dfly_core LABELS DFLY)
cxx_test(score_map_test dfly_core LABELS DFLY)
add_subdirectory(search)

View file

@ -228,8 +228,11 @@ TEST_F(DashTest, Basic) {
TEST_F(DashTest, Segment) {
std::unique_ptr<Segment> seg(new Segment(1));
#ifndef __APPLE__
LOG(INFO) << "Segment size " << sizeof(Segment)
<< " malloc size: " << malloc_usable_size(seg.get());
#endif
set<Segment::Key_t> keys = FillSegment(0);
@ -817,7 +820,7 @@ TEST_F(DashTest, SplitBug) {
string_view line;
uint64_t val;
while (lr.Next(&line)) {
CHECK(absl::SimpleHexAtoi(line, &val));
CHECK(absl::SimpleHexAtoi(line, &val)) << line;
table.Insert(val, 0);
}
EXPECT_EQ(746, table.size());

View file

@ -74,7 +74,8 @@ absl::flat_hash_set<std::string> ICUTokenizeWords(std::string_view text) {
// Convert string to lowercase with ICU library
std::string ICUToLowercase(string_view input) {
icu::UnicodeString uStr = icu::UnicodeString::fromUTF8(input);
icu::UnicodeString uStr =
icu::UnicodeString::fromUTF8(icu::StringPiece(input.data(), input.size()));
uStr.toLower();
std::string result;
uStr.toUTF8String(result);

View file

@ -1,5 +1,5 @@
%skeleton "lalr1.cc" // -*- C++ -*-
%require "3.5.1" // That's what's present on ubuntu 20.04.
%require "3.5" // fedora 32 has this one.
%defines // %header starts from 3.8.1

View file

@ -396,7 +396,12 @@ string Connection::GetClientInfo(unsigned thread_id) const {
int cpu = 0;
socklen_t len = sizeof(cpu);
getsockopt(socket_->native_handle(), SOL_SOCKET, SO_INCOMING_CPU, &cpu, &len);
#ifdef __APPLE__
int my_cpu_id = -1; // __APPLE__ does not have sched_getcpu()
#else
int my_cpu_id = sched_getcpu();
#endif
static constexpr string_view PHASE_NAMES[] = {"readsock", "process"};
static_assert(PHASE_NAMES[PROCESS] == "process");

View file

@ -107,8 +107,13 @@ bool ConfigureKeepAlive(int fd) {
return false;
val = absl::GetFlag(FLAGS_tcp_keepalive);
#ifdef __APPLE__
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)) < 0)
return false;
#else
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0)
return false;
#endif
/* Send next probes after the specified interval. Note that we set the
* delay as interval / 3, as we send three probes before detecting
@ -160,6 +165,7 @@ error_code Listener::ConfigureServerSocket(int fd) {
bool success = ConfigureKeepAlive(fd);
if (!success) {
#ifndef __APPLE__
int myerr = errno;
int socket_type;
@ -170,6 +176,7 @@ error_code Listener::ConfigureServerSocket(int fd) {
socket_type != AF_UNIX) {
LOG(WARNING) << "Could not configure keep alive " << SafeErrorMessage(myerr);
}
#endif
}
return error_code{};

View file

@ -62,7 +62,11 @@ void _serverPanic(const char *file, int line, const char *msg, ...) {
serverLog(LL_WARNING, "!!! Software Failure. Press left mouse button to continue");
serverLog(LL_WARNING, "Guru Meditation: %s #%s:%d", fmtmsg,file,line);
#ifndef NDEBUG
#ifdef __APPLE__
__assert_rtn(msg, file, line, "");
#else
__assert_fail(msg, file, line, "");
#endif
#endif
}

View file

@ -51,6 +51,7 @@
#include <malloc/malloc.h>
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) malloc_size(p)
#define ZMALLOC_LIB "macos"
#endif
/* On native libc implementations, we should still do our best to provide a

View file

@ -21,10 +21,16 @@ add_library(dfly_transaction db_slice.cc malloc_stats.cc engine_shard_set.cc blo
)
cxx_link(dfly_transaction dfly_core strings_lib)
if (NOT APPLE)
SET(SEARCH_FILES search/search_family.cc search/doc_index.cc search/doc_accessors.cc)
cxx_test(search/search_family_test dfly_test_lib LABELS DFLY)
endif()
add_library(dragonfly_lib channel_store.cc command_registry.cc
config_registry.cc conn_context.cc debugcmd.cc dflycmd.cc
generic_family.cc hset_family.cc json_family.cc
search/search_family.cc search/doc_index.cc search/doc_accessors.cc
${SEARCH_FILES}
list_family.cc main_service.cc memory_cmd.cc rdb_load.cc rdb_save.cc replica.cc
protocol_client.cc
snapshot.cc script_mgr.cc server_family.cc malloc_stats.cc
@ -37,7 +43,7 @@ add_library(dragonfly_lib channel_store.cc command_registry.cc
acl/validator.cc)
find_library(ZSTD_LIB NAMES libzstd.a libzstdstatic.a zstd NAMES_PER_DIR)
find_library(ZSTD_LIB NAMES libzstd.a libzstdstatic.a zstd NAMES_PER_DIR REQUIRED)
cxx_link(dragonfly_lib dfly_transaction dfly_facade redis_lib aws_lib strings_lib html_lib
http_client_lib absl::random_random TRDP::jsoncons ${ZSTD_LIB} TRDP::lz4 TRDP::croncpp)
@ -69,7 +75,6 @@ cxx_test(journal/journal_test dfly_test_lib LABELS DFLY)
cxx_test(tiered_storage_test dfly_test_lib LABELS DFLY)
cxx_test(top_keys_test dfly_test_lib LABELS DFLY)
cxx_test(hll_family_test dfly_test_lib LABELS DFLY)
cxx_test(search/search_family_test dfly_test_lib LABELS DFLY)
cxx_test(cluster/cluster_config_test dfly_test_lib LABELS DFLY)
cxx_test(cluster/cluster_family_test dfly_test_lib LABELS DFLY)
cxx_test(acl/user_registry_test dfly_test_lib LABELS DFLY)

View file

@ -39,7 +39,11 @@ namespace fs = std::filesystem;
namespace {
const size_t kBucketConnectMs = 2000;
#ifdef __linux__
const int kRdbWriteFlags = O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC | O_DIRECT;
#endif
constexpr string_view kS3Prefix = "s3://"sv;
bool IsCloudPath(string_view path) {
@ -252,6 +256,7 @@ GenericError RdbSnapshot::Start(SaveMode save_mode, const std::string& path,
return GenericError(res.error(), "Couldn't open file for writing");
io_sink_.reset(*res);
} else {
#ifdef __linux__
auto res = OpenLinux(path, kRdbWriteFlags, 0666);
if (!res) {
return GenericError(
@ -261,6 +266,9 @@ GenericError RdbSnapshot::Start(SaveMode save_mode, const std::string& path,
is_linux_file_ = true;
io_sink_.reset(new LinuxWriteWrapper(res->release()));
is_direct = kRdbWriteFlags & O_DIRECT;
#else
LOG(FATAL) << "Linux I/O is not supported on this platform";
#endif
}
}
@ -507,6 +515,7 @@ RdbSaver::GlobalData SaveStagesController::GetGlobalData() const {
script_bodies.push_back(move(data.body));
}
#ifndef __APPLE__
{
shard_set->Await(0, [&] {
auto* indices = EngineShard::tlocal()->search_indices();
@ -517,6 +526,7 @@ RdbSaver::GlobalData SaveStagesController::GetGlobalData() const {
}
});
}
#endif
return RdbSaver::GlobalData{move(script_bodies), move(search_indices)};
}

View file

@ -754,14 +754,14 @@ Usage: dragonfly [FLAGS]
}
}
auto memory = ReadMemInfo().value();
io::MemInfoData mem_info = ReadMemInfo().value_or(io::MemInfoData{});
size_t max_available_threads = 0u;
#ifdef __linux__
UpdateResourceLimitsIfInsideContainer(&memory, &max_available_threads);
UpdateResourceLimitsIfInsideContainer(&mem_info, &max_available_threads);
#endif
if (memory.swap_total != 0)
if (mem_info.swap_total != 0)
LOG(WARNING) << "SWAP is enabled. Consider disabling it when running Dragonfly.";
dfly::max_memory_limit = dfly::GetMaxMemoryFlag();
@ -769,8 +769,13 @@ Usage: dragonfly [FLAGS]
if (dfly::max_memory_limit == 0) {
LOG(INFO) << "maxmemory has not been specified. Deciding myself....";
size_t available = memory.mem_avail;
size_t available = mem_info.mem_avail;
size_t maxmemory = size_t(0.8 * available);
if (maxmemory == 0) {
LOG(ERROR) << "Could not deduce how much memory available. "
<< "Use --maxmemory=... to specify explicitly";
return 1;
}
LOG(INFO) << "Found " << HumanReadableNumBytes(available)
<< " available memory. Setting maxmemory to " << HumanReadableNumBytes(maxmemory);
@ -778,9 +783,9 @@ Usage: dragonfly [FLAGS]
dfly::max_memory_limit = maxmemory;
} else {
string hr_limit = HumanReadableNumBytes(dfly::max_memory_limit);
if (dfly::max_memory_limit > memory.mem_avail)
if (dfly::max_memory_limit > mem_info.mem_avail)
LOG(WARNING) << "Got memory limit " << hr_limit << ", however only "
<< HumanReadableNumBytes(memory.mem_avail) << " was found.";
<< HumanReadableNumBytes(mem_info.mem_avail) << " was found.";
LOG(INFO) << "Max memory limit is: " << hr_limit;
}

View file

@ -108,7 +108,7 @@ class InMemSource : public ::io::Source {
::io::Result<size_t> InMemSource::ReadSome(const iovec* v, uint32_t len) {
ssize_t read_total = 0;
while (size_t(offs_) < buf_.size() && len > 0) {
size_t read_sz = min(buf_.size() - offs_, v->iov_len);
size_t read_sz = min<size_t>(buf_.size() - offs_, v->iov_len);
memcpy(v->iov_base, buf_.data() + offs_, read_sz);
read_total += read_sz;
offs_ += read_sz;
@ -723,7 +723,7 @@ void GenericFamily::Expire(CmdArgList args, ConnectionContext* cntx) {
return (*cntx)->SendError(InvalidExpireTime(cntx->cid->name()));
}
int_arg = std::max(int_arg, -1L);
int_arg = std::max<int64_t>(int_arg, -1);
DbSlice::ExpireParams params{.value = int_arg};
auto cb = [&](Transaction* t, EngineShard* shard) {
@ -743,7 +743,7 @@ void GenericFamily::ExpireAt(CmdArgList args, ConnectionContext* cntx) {
return (*cntx)->SendError(kInvalidIntErr);
}
int_arg = std::max(int_arg, 0L);
int_arg = std::max<int64_t>(int_arg, 0L);
DbSlice::ExpireParams params{.value = int_arg, .absolute = true};
auto cb = [&](Transaction* t, EngineShard* shard) {
@ -787,7 +787,7 @@ void GenericFamily::PexpireAt(CmdArgList args, ConnectionContext* cntx) {
if (!absl::SimpleAtoi(msec, &int_arg)) {
return (*cntx)->SendError(kInvalidIntErr);
}
int_arg = std::max(int_arg, 0L);
int_arg = std::max<int64_t>(int_arg, 0L);
DbSlice::ExpireParams params{.value = int_arg, .absolute = true, .unit = TimeUnit::MSEC};
auto cb = [&](Transaction* t, EngineShard* shard) {
@ -810,7 +810,7 @@ void GenericFamily::Pexpire(CmdArgList args, ConnectionContext* cntx) {
if (!absl::SimpleAtoi(msec, &int_arg)) {
return (*cntx)->SendError(kInvalidIntErr);
}
int_arg = std::max(int_arg, 0L);
int_arg = std::max<int64_t>(int_arg, 0L);
DbSlice::ExpireParams params{.value = int_arg, .unit = TimeUnit::MSEC};
auto cb = [&](Transaction* t, EngineShard* shard) {

View file

@ -2160,7 +2160,11 @@ void Service::RegisterCommands() {
JsonFamily::Register(&registry_);
BitOpsFamily::Register(&registry_);
HllFamily::Register(&registry_);
#ifndef __APPLE__
SearchFamily::Register(&registry_);
#endif
acl_family_.Register(&registry_);
server_family_.Register(&registry_);

View file

@ -63,8 +63,8 @@ std::string MallocStats(bool backing, unsigned tid) {
uint64_t delta = (absl::GetCurrentTimeNanos() - start) / 1000;
absl::StrAppend(&str, "--- End mimalloc statistics, took ", delta, "us ---\n");
absl::StrAppend(&str, "total reserved: ", reserved, ", comitted: ", committed, ", used: ", used,
"fragmentation waste: ", (100.0 * (committed - used)) / std::max(1UL, committed),
"%\n");
"fragmentation waste: ",
(100.0 * (committed - used)) / std::max<size_t>(1UL, committed), "%\n");
return str;
}

View file

@ -1495,7 +1495,7 @@ auto RdbLoaderBase::ReadGeneric(int rdbtype) -> io::Result<OpaqueObj> {
}
auto RdbLoaderBase::ReadHMap() -> io::Result<OpaqueObj> {
uint64_t len;
size_t len;
SET_OR_UNEXPECT(LoadLen(nullptr), len);
if (len == 0)
@ -1506,7 +1506,7 @@ auto RdbLoaderBase::ReadHMap() -> io::Result<OpaqueObj> {
len *= 2;
load_trace->arr.resize((len + kMaxBlobLen - 1) / kMaxBlobLen);
for (size_t i = 0; i < load_trace->arr.size(); ++i) {
size_t n = std::min(len, kMaxBlobLen);
size_t n = std::min<size_t>(len, kMaxBlobLen);
load_trace->arr[i].resize(n);
for (size_t j = 0; j < n; ++j) {
error_code ec = ReadStringObj(&load_trace->arr[i][j].rdb_var);
@ -1533,7 +1533,7 @@ auto RdbLoaderBase::ReadZSet(int rdbtype) -> io::Result<OpaqueObj> {
double score;
for (size_t i = 0; i < load_trace->arr.size(); ++i) {
size_t n = std::min(zsetlen, kMaxBlobLen);
size_t n = std::min<size_t>(zsetlen, kMaxBlobLen);
load_trace->arr[i].resize(n);
for (size_t j = 0; j < n; ++j) {
error_code ec = ReadStringObj(&load_trace->arr[i][j].rdb_var);
@ -1581,7 +1581,7 @@ auto RdbLoaderBase::ReadListQuicklist(int rdbtype) -> io::Result<OpaqueObj> {
load_trace->arr.resize((len + kMaxBlobLen - 1) / kMaxBlobLen);
for (size_t i = 0; i < load_trace->arr.size(); ++i) {
size_t n = std::min(len, kMaxBlobLen);
size_t n = std::min<size_t>(len, kMaxBlobLen);
load_trace->arr[i].resize(n);
for (size_t j = 0; j < n; ++j) {
uint64_t container = QUICKLIST_NODE_CONTAINER_PACKED;

View file

@ -947,10 +947,12 @@ class RdbSaver::Impl {
// correct closing semantics - channel is closing when K producers marked it as closed.
RdbSaver::Impl::Impl(bool align_writes, unsigned producers_len, CompressionMode compression_mode,
SaveMode sm, io::Sink* sink)
: sink_(sink), shard_snapshots_(producers_len),
: sink_(sink),
shard_snapshots_(producers_len),
meta_serializer_(CompressionMode::NONE), // Note: I think there is not need for compression
// at all in meta serializer
channel_{128, producers_len}, compression_mode_(compression_mode) {
channel_{128, producers_len},
compression_mode_(compression_mode) {
if (align_writes) {
aligned_buf_.emplace(kBufLen, sink);
sink_ = &aligned_buf_.value();
@ -1022,7 +1024,7 @@ error_code RdbSaver::Impl::ConsumeChannel(const Cancellation* cll) {
// we can not exit on io-error since we spawn fibers that push data.
// TODO: we may signal them to stop processing and exit asap in case of the error.
while (record = records_popper.Pop()) {
while ((record = records_popper.Pop())) {
if (io_error || cll->IsCancelled())
continue;
@ -1037,7 +1039,7 @@ error_code RdbSaver::Impl::ConsumeChannel(const Cancellation* cll) {
if (io_error) {
break;
}
} while (record = records_popper.TryPop());
} while ((record = records_popper.TryPop()));
} // while (records_popper.Pop())
size_t pushed_bytes = 0;

View file

@ -146,4 +146,33 @@ class ShardDocIndices {
absl::flat_hash_map<std::string, std::unique_ptr<ShardDocIndex>> indices_;
};
#ifdef __APPLE__
inline ShardDocIndex* ShardDocIndices::GetIndex(std::string_view name) {
return nullptr;
}
inline void ShardDocIndices::InitIndex(const OpArgs& op_args, std::string_view name,
std::shared_ptr<DocIndex> index) {
}
inline bool ShardDocIndices::DropIndex(std::string_view name) {
return false;
}
inline void ShardDocIndices::RebuildAllIndices(const OpArgs& op_args) {
}
inline std::vector<std::string> ShardDocIndices::GetIndexNames() const {
return {};
}
inline void ShardDocIndices::AddDoc(std::string_view key, const DbContext& db_cnt,
const PrimeValue& pv) {
}
inline void ShardDocIndices::RemoveDoc(std::string_view key, const DbContext& db_cnt,
const PrimeValue& pv) {
}
#endif // __APPLE__
} // namespace dfly

View file

@ -315,7 +315,7 @@ std::optional<cron::cronexpr> InferSnapshotCronExpr() {
if (!snapshot_cron_exp.empty() && !save_time.empty()) {
LOG(ERROR) << "snapshot_cron and save_schedule flags should not be set simultaneously";
quick_exit(1);
exit(1);
}
string raw_cron_expr;
@ -411,10 +411,14 @@ void ServerFamily::Init(util::AcceptServer* acceptor, std::vector<facade::Listen
used_mem_peak.store(sum, memory_order_relaxed);
};
// TODO: to addd support on non-linux platforms as well
#ifdef __linux__
uint32_t cache_hz = max(GetFlag(FLAGS_hz) / 10, 1u);
uint32_t period_ms = max(1u, 1000 / cache_hz);
stats_caching_task_ =
pb_task_->AwaitBrief([&] { return pb_task_->AddPeriodic(period_ms, cache_cb); });
#endif
// check for '--replicaof' before loading anything
if (ReplicaOfFlag flag = GetFlag(FLAGS_replicaof); flag.has_value()) {
@ -461,8 +465,10 @@ void ServerFamily::Shutdown() {
}
pb_task_->Await([this] {
pb_task_->CancelPeriodic(stats_caching_task_);
stats_caching_task_ = 0;
if (stats_caching_task_) {
pb_task_->CancelPeriodic(stats_caching_task_);
stats_caching_task_ = 0;
}
if (journal_->EnterLameDuck()) {
auto ec = journal_->Close();
@ -1522,6 +1528,7 @@ void ServerFamily::Info(CmdArgList args, ConnectionContext* cntx) {
}
}
#ifndef __APPLE__
if (should_enter("CPU")) {
ADD_HEADER("# CPU");
struct rusage ru, cu, tu;
@ -1535,6 +1542,7 @@ void ServerFamily::Info(CmdArgList args, ConnectionContext* cntx) {
append("used_cpu_sys_main_thread", StrCat(tu.ru_stime.tv_sec, ".", tu.ru_stime.tv_usec));
append("used_cpu_user_main_thread", StrCat(tu.ru_utime.tv_sec, ".", tu.ru_utime.tv_usec));
}
#endif
if (should_enter("CLUSTER")) {
ADD_HEADER("# Cluster");