1
0
Fork 0
mirror of https://github.com/dragonflydb/dragonfly.git synced 2024-12-14 11:58:02 +00:00

chore(search): Extend FT.INFO (#2133)

* chore(search): Add index definition info to ft.info

* chore(search): Add flags to ft.info

Signed-off-by: Vladislav Oleshko <vlad@dragonflydb.io>

---------

Signed-off-by: Vladislav Oleshko <vlad@dragonflydb.io>
This commit is contained in:
Vladislav 2023-11-06 16:18:13 +03:00 committed by GitHub
parent 1bb27dd448
commit 821884e333
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 48 additions and 18 deletions

View file

@ -180,6 +180,8 @@ void ShardDocIndex::Rebuild(const OpArgs& op_args, PMR_NS::memory_resource* mr)
auto cb = [this](string_view key, BaseAccessor* doc) { indices_.Add(key_index_.Add(key), doc); };
TraverseAllMatching(*base_, op_args, cb);
VLOG(1) << "Indexed " << key_index_.Size() << " docs on " << base_->prefix;
}
void ShardDocIndex::AddDoc(string_view key, const DbContext& db_cntx, const PrimeValue& pv) {

View file

@ -398,20 +398,40 @@ void SearchFamily::FtInfo(CmdArgList args, ConnectionContext* cntx) {
for (const auto& info : infos)
total_num_docs += info.num_docs;
const auto& schema = infos.front().base_index.schema;
const auto& info = infos.front();
const auto& schema = info.base_index.schema;
(*cntx)->StartCollection(3, RedisReplyBuilder::MAP);
(*cntx)->StartCollection(4, RedisReplyBuilder::MAP);
(*cntx)->SendSimpleString("index_name");
(*cntx)->SendSimpleString(idx_name);
(*cntx)->SendSimpleString("fields");
(*cntx)->SendSimpleString("index_definition");
{
(*cntx)->StartCollection(2, RedisReplyBuilder::MAP);
(*cntx)->SendSimpleString("key_type");
(*cntx)->SendSimpleString(info.base_index.type == DocIndex::JSON ? "JSON" : "HASH");
(*cntx)->SendSimpleString("prefix");
(*cntx)->SendSimpleString(info.base_index.prefix);
}
(*cntx)->SendSimpleString("attributes");
(*cntx)->StartArray(schema.fields.size());
for (const auto& [field_ident, field_info] : schema.fields) {
string_view reply[6] = {"identifier", string_view{field_ident},
vector<string> info;
string_view base[] = {"identifier"sv, string_view{field_ident},
"attribute", field_info.short_name,
"type"sv, SearchFieldTypeToString(field_info.type)};
(*cntx)->SendSimpleStrArr(reply);
info.insert(info.end(), base, base + ABSL_ARRAYSIZE(base));
if (field_info.flags & search::SchemaField::NOINDEX)
info.push_back("NOINDEX");
if (field_info.flags & search::SchemaField::SORTABLE)
info.push_back("SORTABLE");
(*cntx)->SendSimpleStrArr(info);
}
(*cntx)->SendSimpleString("num_docs");

View file

@ -89,9 +89,10 @@ TEST_F(SearchFamilyTest, InfoIndex) {
auto info = Run({"ft.info", "idx-1"});
EXPECT_THAT(
info, RespArray(ElementsAre(_, _, "fields",
RespArray(ElementsAre(RespArray(ElementsAre(
"identifier", "name", "attribute", "name", "type", "TEXT")))),
info, RespArray(ElementsAre(
_, _, _, RespArray(ElementsAre("key_type", "HASH", "prefix", "doc-")), "attributes",
RespArray(ElementsAre(RespArray(
ElementsAre("identifier", "name", "attribute", "name", "type", "TEXT")))),
"num_docs", IntArg(15))));
}

View file

@ -371,6 +371,7 @@ void RebuildAllSearchIndices(Service* service) {
// On MacOS we don't include search so FT.CREATE won't exist.
return;
}
boost::intrusive_ptr<Transaction> trans{new Transaction{cmd}};
trans->InitByArgs(0, {});
trans->ScheduleSingleHop([](auto* trans, auto* es) {

View file

@ -100,9 +100,9 @@ def contains_test_data(itype, res, td_indices):
@dfly_args({"proactor_threads": 4})
async def test_management(async_client: aioredis.Redis):
SCHEMA_1 = [TextField("f1"), NumericField("f2")]
SCHEMA_1 = [TextField("f1"), NumericField("f2", sortable=True)]
SCHEMA_2 = [
NumericField("f3"),
NumericField("f3", no_index=True, sortable=True),
TagField("f4"),
VectorField(
"f5",
@ -129,16 +129,18 @@ async def test_management(async_client: aioredis.Redis):
assert sorted(await async_client.execute_command("FT._LIST")) == ["i1", "i2"]
i1info = await i1.info()
assert i1info["index_definition"] == ["key_type", "HASH", "prefix", "p1"]
assert i1info["num_docs"] == 10
assert sorted(i1info["fields"]) == [
assert sorted(i1info["attributes"]) == [
["identifier", "f1", "attribute", "f1", "type", "TEXT"],
["identifier", "f2", "attribute", "f2", "type", "NUMERIC"],
["identifier", "f2", "attribute", "f2", "type", "NUMERIC", "SORTABLE"],
]
i2info = await i2.info()
assert i2info["index_definition"] == ["key_type", "HASH", "prefix", "p2"]
assert i2info["num_docs"] == 15
assert sorted(i2info["fields"]) == [
["identifier", "f3", "attribute", "f3", "type", "NUMERIC"],
assert sorted(i2info["attributes"]) == [
["identifier", "f3", "attribute", "f3", "type", "NUMERIC", "NOINDEX", "SORTABLE"],
["identifier", "f4", "attribute", "f4", "type", "TAG"],
["identifier", "f5", "attribute", "f5", "type", "VECTOR"],
]
@ -329,6 +331,7 @@ async def test_multidim_knn(async_client: aioredis.Redis, index_type, algo_type)
await i3.dropindex()
@dfly_args({"proactor_threads": 4})
async def test_knn_score_return(async_client: aioredis.Redis):
i1 = async_client.ft("i1")
vector_field = VectorField(
@ -438,13 +441,16 @@ async def test_index_persistence(df_server):
def build_fields_set(info):
fields = set()
for field in info["fields"]:
for field in info["attributes"]:
fields.add(tuple(field))
return fields
assert build_fields_set(info_1) == build_fields_set(info_1_new)
assert build_fields_set(info_2) == build_fields_set(info_2_new)
assert info_1["index_definition"] == info_1_new["index_definition"]
assert info_2["index_definition"] == info_2_new["index_definition"]
assert info_1["num_docs"] == info_1_new["num_docs"]
assert info_2["num_docs"] == info_2_new["num_docs"]