mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2024-12-14 11:58:02 +00:00
Merge branch 'main' into chakaz/takeover-crash
This commit is contained in:
commit
8f6caa2d06
100 changed files with 1948 additions and 1975 deletions
|
@ -15,13 +15,13 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: v1.24.0
|
||||
version: v1.25.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v1.24.0"
|
||||
appVersion: "v1.25.0"
|
||||
|
||||
home: https://dragonflydb.io/
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -72,7 +72,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -60,7 +60,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -67,7 +67,7 @@ spec:
|
|||
image: busybox:latest
|
||||
name: sidecar-string
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -64,7 +64,7 @@ spec:
|
|||
command: ["/bin/sh"]
|
||||
args: ["-c", "date; sleep 3600;"]
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -60,7 +60,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -68,7 +68,7 @@ spec:
|
|||
name: initcontainer-string
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -65,7 +65,7 @@ spec:
|
|||
args: ["-c", "date; sleep 1;"]
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/extra-manifests.yaml
|
||||
|
@ -28,7 +28,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -50,7 +50,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/extra-manifests.yaml
|
||||
|
@ -28,7 +28,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -50,7 +50,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -68,7 +68,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/extra-manifests.yaml
|
||||
|
@ -28,7 +28,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -50,7 +50,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
serviceName: test
|
||||
|
@ -69,7 +69,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
serviceName: test
|
||||
|
@ -61,7 +61,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -17,7 +17,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -29,7 +29,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -51,7 +51,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -70,7 +70,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/metrics-service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
type: metrics
|
||||
spec:
|
||||
|
@ -43,7 +43,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -65,7 +65,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -83,7 +83,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
@ -124,7 +124,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
endpoints:
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -60,7 +60,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -64,7 +64,7 @@ spec:
|
|||
- name: dragonfly
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
|
@ -43,7 +43,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -61,7 +61,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/metrics-service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
type: metrics
|
||||
spec:
|
||||
|
@ -43,7 +43,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -65,7 +65,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -83,7 +83,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
@ -125,7 +125,7 @@ metadata:
|
|||
release: prometheus-stack
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
endpoints:
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -78,7 +78,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/extra-manifests.yaml
|
||||
|
@ -28,7 +28,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
|
@ -44,7 +44,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -66,7 +66,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -77,7 +77,7 @@ spec:
|
|||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/tls-secret: 9640bcf6cb420c9ce8d883c49b83be32311540f5f550cb664184e5cfda641eb7
|
||||
checksum/tls-secret: 162331fd0ed56c78436aa281647f357d20a6a94960f82c7e99ebbe234b057e72
|
||||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
|
@ -85,7 +85,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
---
|
||||
# Source: dragonfly/templates/service.yaml
|
||||
|
@ -20,7 +20,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
type: ClusterIP
|
||||
|
@ -42,7 +42,7 @@ metadata:
|
|||
labels:
|
||||
app.kubernetes.io/name: dragonfly
|
||||
app.kubernetes.io/instance: test
|
||||
app.kubernetes.io/version: "v1.24.0"
|
||||
app.kubernetes.io/version: "v1.25.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
|
@ -63,7 +63,7 @@ spec:
|
|||
serviceAccountName: test-dragonfly
|
||||
containers:
|
||||
- name: dragonfly
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.24.0"
|
||||
image: "docker.dragonflydb.io/dragonflydb/dragonfly:v1.25.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: dragonfly
|
||||
|
|
|
@ -16,7 +16,7 @@ On Debian/Ubuntu:
|
|||
|
||||
```bash
|
||||
sudo apt install ninja-build libunwind-dev libboost-fiber-dev libssl-dev \
|
||||
autoconf-archive libtool cmake g++ libzstd-dev bison libxml2-dev
|
||||
autoconf-archive libtool cmake g++ libzstd-dev bison libxml2-dev zlib1g-dev
|
||||
```
|
||||
|
||||
On Fedora:
|
||||
|
|
|
@ -82,7 +82,8 @@ size_t NodeNegFillLimit(int fill) {
|
|||
}
|
||||
|
||||
const uint8_t* uint_ptr(string_view sv) {
|
||||
return reinterpret_cast<const uint8_t*>(sv.data());
|
||||
static uint8_t empty = 0;
|
||||
return sv.empty() ? &empty : reinterpret_cast<const uint8_t*>(sv.data());
|
||||
}
|
||||
|
||||
bool IsLargeElement(size_t sz, int fill) {
|
||||
|
@ -136,10 +137,6 @@ quicklistNode* CreateNode() {
|
|||
return node;
|
||||
}
|
||||
|
||||
uint8_t* LP_FromElem(string_view elem) {
|
||||
return lpPrepend(lpNew(0), uint_ptr(elem), elem.size());
|
||||
}
|
||||
|
||||
uint8_t* LP_Insert(uint8_t* lp, string_view elem, uint8_t* pos, int lp_where) {
|
||||
return lpInsertString(lp, uint_ptr(elem), elem.size(), pos, lp_where, NULL);
|
||||
}
|
||||
|
@ -155,15 +152,16 @@ uint8_t* LP_Prepend(uint8_t* lp, string_view elem) {
|
|||
quicklistNode* CreateNode(int container, string_view value) {
|
||||
quicklistNode* new_node = CreateNode();
|
||||
new_node->container = container;
|
||||
new_node->sz = value.size();
|
||||
new_node->count++;
|
||||
new_node->count = 1;
|
||||
|
||||
if (container == QUICKLIST_NODE_CONTAINER_PLAIN) {
|
||||
DCHECK(!value.empty());
|
||||
new_node->entry = (uint8_t*)zmalloc(new_node->sz);
|
||||
memcpy(new_node->entry, value.data(), new_node->sz);
|
||||
new_node->sz = value.size();
|
||||
} else {
|
||||
new_node->entry = LP_FromElem(value);
|
||||
new_node->entry = LP_Prepend(lpNew(0), value);
|
||||
new_node->sz = lpBytes(new_node->entry);
|
||||
}
|
||||
|
||||
return new_node;
|
||||
|
@ -355,6 +353,40 @@ void QList::Push(string_view value, Where where) {
|
|||
}
|
||||
}
|
||||
|
||||
string QList::Pop(Where where) {
|
||||
DCHECK_GT(count_, 0u);
|
||||
quicklistNode* node;
|
||||
if (where == HEAD) {
|
||||
node = head_;
|
||||
} else {
|
||||
DCHECK_EQ(TAIL, where);
|
||||
node = tail_;
|
||||
}
|
||||
|
||||
/* The head and tail should never be compressed */
|
||||
DCHECK(node->encoding != QUICKLIST_NODE_ENCODING_LZF);
|
||||
|
||||
string res;
|
||||
if (ABSL_PREDICT_FALSE(QL_NODE_IS_PLAIN(node))) {
|
||||
// TODO: We could avoid this copy by returning the pointer of the plain node.
|
||||
// But the higher level APIs should support this.
|
||||
res.assign(reinterpret_cast<char*>(node->entry), node->sz);
|
||||
DelNode(node);
|
||||
} else {
|
||||
uint8_t* pos = where == HEAD ? lpFirst(node->entry) : lpLast(node->entry);
|
||||
unsigned int vlen;
|
||||
long long vlong;
|
||||
uint8_t* vstr = lpGetValue(pos, &vlen, &vlong);
|
||||
if (vstr) {
|
||||
res.assign(reinterpret_cast<char*>(vstr), vlen);
|
||||
} else {
|
||||
res = absl::StrCat(vlong);
|
||||
}
|
||||
DelPackedIndex(node, &pos);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void QList::AppendListpack(unsigned char* zl) {
|
||||
quicklistNode* node = CreateNode();
|
||||
node->entry = zl;
|
||||
|
@ -433,13 +465,10 @@ bool QList::PushHead(string_view value) {
|
|||
count_++;
|
||||
|
||||
if (ABSL_PREDICT_TRUE(NodeAllowInsert(head_, fill_, sz))) {
|
||||
head_->entry = lpPrepend(head_->entry, uint_ptr(value), sz);
|
||||
head_->entry = LP_Prepend(head_->entry, value);
|
||||
NodeUpdateSz(head_);
|
||||
} else {
|
||||
quicklistNode* node = CreateNode();
|
||||
node->entry = LP_FromElem(value);
|
||||
|
||||
NodeUpdateSz(node);
|
||||
quicklistNode* node = CreateNode(QUICKLIST_NODE_CONTAINER_PACKED, value);
|
||||
InsertNode(head_, node, BEFORE);
|
||||
}
|
||||
|
||||
|
@ -458,16 +487,16 @@ bool QList::PushTail(string_view value) {
|
|||
|
||||
count_++;
|
||||
if (ABSL_PREDICT_TRUE(NodeAllowInsert(orig, fill_, sz))) {
|
||||
orig->entry = lpAppend(orig->entry, uint_ptr(value), sz);
|
||||
orig->entry = LP_Append(orig->entry, value);
|
||||
NodeUpdateSz(orig);
|
||||
} else {
|
||||
quicklistNode* node = CreateNode();
|
||||
node->entry = LP_FromElem(value);
|
||||
NodeUpdateSz(node);
|
||||
InsertNode(orig, node, AFTER);
|
||||
orig->count++;
|
||||
return false;
|
||||
}
|
||||
tail_->count++;
|
||||
return (orig != tail_);
|
||||
|
||||
quicklistNode* node = CreateNode(QUICKLIST_NODE_CONTAINER_PACKED, value);
|
||||
InsertNode(orig, node, AFTER);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void QList::InsertPlainNode(quicklistNode* old_node, string_view value, InsertOpt insert_opt) {
|
||||
|
@ -525,10 +554,9 @@ void QList::Insert(Iterator it, std::string_view elem, InsertOpt insert_opt) {
|
|||
InsertPlainNode(tail_, elem, insert_opt);
|
||||
return;
|
||||
}
|
||||
new_node = CreateNode();
|
||||
new_node->entry = LP_FromElem(elem);
|
||||
|
||||
new_node = CreateNode(QUICKLIST_NODE_CONTAINER_PACKED, elem);
|
||||
InsertNode(NULL, new_node, insert_opt);
|
||||
new_node->count++;
|
||||
count_++;
|
||||
return;
|
||||
}
|
||||
|
@ -602,10 +630,7 @@ void QList::Insert(Iterator it, std::string_view elem, InsertOpt insert_opt) {
|
|||
} else if (full && ((at_tail && !avail_next && after) || (at_head && !avail_prev && !after))) {
|
||||
/* If we are: full, and our prev/next has no available space, then:
|
||||
* - create new node and attach to qlist */
|
||||
new_node = CreateNode();
|
||||
new_node->entry = LP_FromElem(elem);
|
||||
new_node->count++;
|
||||
NodeUpdateSz(new_node);
|
||||
new_node = CreateNode(QUICKLIST_NODE_CONTAINER_PACKED, elem);
|
||||
InsertNode(node, new_node, insert_opt);
|
||||
} else if (full) {
|
||||
/* else, node is full we need to split it. */
|
||||
|
@ -724,7 +749,7 @@ void QList::Compress(quicklistNode* node) {
|
|||
reverse = reverse->prev;
|
||||
}
|
||||
|
||||
if (!in_depth)
|
||||
if (!in_depth && node)
|
||||
CompressNodeIfNeeded(node);
|
||||
|
||||
/* At this point, forward and reverse are one node beyond depth */
|
||||
|
@ -988,6 +1013,80 @@ auto QList::Erase(Iterator it) -> Iterator {
|
|||
return it;
|
||||
}
|
||||
|
||||
bool QList::Erase(const long start, unsigned count) {
|
||||
if (count == 0)
|
||||
return false;
|
||||
|
||||
unsigned extent = count; /* range is inclusive of start position */
|
||||
|
||||
if (start >= 0 && extent > (count_ - start)) {
|
||||
/* if requesting delete more elements than exist, limit to list size. */
|
||||
extent = count_ - start;
|
||||
} else if (start < 0 && extent > (unsigned long)(-start)) {
|
||||
/* else, if at negative offset, limit max size to rest of list. */
|
||||
extent = -start; /* c.f. LREM -29 29; just delete until end. */
|
||||
}
|
||||
|
||||
Iterator it = GetIterator(start);
|
||||
quicklistNode* node = it.current_;
|
||||
long offset = it.offset_;
|
||||
|
||||
/* iterate over next nodes until everything is deleted. */
|
||||
while (extent) {
|
||||
quicklistNode* next = node->next;
|
||||
|
||||
unsigned long del;
|
||||
int delete_entire_node = 0;
|
||||
if (offset == 0 && extent >= node->count) {
|
||||
/* If we are deleting more than the count of this node, we
|
||||
* can just delete the entire node without listpack math. */
|
||||
delete_entire_node = 1;
|
||||
del = node->count;
|
||||
} else if (offset >= 0 && extent + offset >= node->count) {
|
||||
/* If deleting more nodes after this one, calculate delete based
|
||||
* on size of current node. */
|
||||
del = node->count - offset;
|
||||
} else if (offset < 0) {
|
||||
/* If offset is negative, we are in the first run of this loop
|
||||
* and we are deleting the entire range
|
||||
* from this start offset to end of list. Since the Negative
|
||||
* offset is the number of elements until the tail of the list,
|
||||
* just use it directly as the deletion count. */
|
||||
del = -offset;
|
||||
|
||||
/* If the positive offset is greater than the remaining extent,
|
||||
* we only delete the remaining extent, not the entire offset.
|
||||
*/
|
||||
if (del > extent)
|
||||
del = extent;
|
||||
} else {
|
||||
/* else, we are deleting less than the extent of this node, so
|
||||
* use extent directly. */
|
||||
del = extent;
|
||||
}
|
||||
|
||||
if (delete_entire_node || QL_NODE_IS_PLAIN(node)) {
|
||||
DelNode(node);
|
||||
} else {
|
||||
DecompressNodeIfNeeded(true, node);
|
||||
node->entry = lpDeleteRange(node->entry, offset, del);
|
||||
NodeUpdateSz(node);
|
||||
node->count -= del;
|
||||
count_ -= del;
|
||||
if (node->count == 0) {
|
||||
DelNode(node);
|
||||
} else {
|
||||
RecompressOnly(node);
|
||||
}
|
||||
}
|
||||
|
||||
extent -= del;
|
||||
node = next;
|
||||
offset = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool QList::Entry::operator==(std::string_view sv) const {
|
||||
if (std::holds_alternative<int64_t>(value_)) {
|
||||
char buf[absl::numbers_internal::kFastToBufferSize];
|
||||
|
|
|
@ -8,7 +8,8 @@ extern "C" {
|
|||
#include "redis/quicklist.h"
|
||||
}
|
||||
|
||||
#include <functional>
|
||||
#include <absl/functional/function_ref.h>
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
@ -35,6 +36,10 @@ class QList {
|
|||
return std::get<std::string_view>(value_);
|
||||
}
|
||||
|
||||
bool is_int() const {
|
||||
return std::holds_alternative<int64_t>(value_);
|
||||
}
|
||||
|
||||
int64_t ival() const {
|
||||
return std::get<int64_t>(value_);
|
||||
}
|
||||
|
@ -66,7 +71,7 @@ class QList {
|
|||
friend class QList;
|
||||
};
|
||||
|
||||
using IterateFunc = std::function<bool(Entry)>;
|
||||
using IterateFunc = absl::FunctionRef<bool(Entry)>;
|
||||
enum InsertOpt { BEFORE, AFTER };
|
||||
|
||||
QList();
|
||||
|
@ -85,6 +90,10 @@ class QList {
|
|||
void Clear();
|
||||
|
||||
void Push(std::string_view value, Where where);
|
||||
|
||||
// Returns the popped value. Precondition: list is not empty.
|
||||
std::string Pop(Where where);
|
||||
|
||||
void AppendListpack(unsigned char* zl);
|
||||
void AppendPlain(unsigned char* zl, size_t sz);
|
||||
|
||||
|
@ -109,12 +118,29 @@ class QList {
|
|||
// Requires calling subsequent Next() to initialize the iterator.
|
||||
Iterator GetIterator(long idx) const;
|
||||
|
||||
uint32_t noded_count() const {
|
||||
uint32_t node_count() const {
|
||||
return len_;
|
||||
}
|
||||
|
||||
unsigned compress_param() const {
|
||||
return compress_;
|
||||
}
|
||||
|
||||
Iterator Erase(Iterator it);
|
||||
|
||||
// Returns true if elements were deleted, false if list has not changed.
|
||||
// Negative start index is allowed.
|
||||
bool Erase(const long start, unsigned count);
|
||||
|
||||
// Needed by tests and the rdb code.
|
||||
const quicklistNode* Head() const {
|
||||
return head_;
|
||||
}
|
||||
|
||||
const quicklistNode* Tail() const {
|
||||
return tail_;
|
||||
}
|
||||
|
||||
private:
|
||||
bool AllowCompression() const {
|
||||
return compress_ != 0;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "core/qlist.h"
|
||||
|
||||
#include <absl/strings/str_cat.h>
|
||||
#include <absl/strings/str_format.h>
|
||||
#include <gmock/gmock.h>
|
||||
|
||||
#include "base/gtest.h"
|
||||
|
@ -21,6 +22,100 @@ namespace dfly {
|
|||
using namespace std;
|
||||
using namespace testing;
|
||||
|
||||
static int _ql_verify_compress(const QList& ql) {
|
||||
int errors = 0;
|
||||
unsigned compress_param = ql.compress_param();
|
||||
if (compress_param > 0) {
|
||||
const quicklistNode* node = ql.Head();
|
||||
unsigned int low_raw = compress_param;
|
||||
unsigned int high_raw = ql.node_count() - compress_param;
|
||||
|
||||
for (unsigned int at = 0; at < ql.node_count(); at++, node = node->next) {
|
||||
if (node && (at < low_raw || at >= high_raw)) {
|
||||
if (node->encoding != QUICKLIST_NODE_ENCODING_RAW) {
|
||||
LOG(ERROR) << "Incorrect compression: node " << at << " is compressed at depth "
|
||||
<< compress_param << " ((" << low_raw << "," << high_raw
|
||||
<< " total nodes: " << ql.node_count() << "; size: " << node->sz
|
||||
<< "; recompress: " << node->recompress;
|
||||
errors++;
|
||||
}
|
||||
} else {
|
||||
if (node->encoding != QUICKLIST_NODE_ENCODING_LZF && !node->attempted_compress) {
|
||||
LOG(ERROR) << absl::StrFormat(
|
||||
"Incorrect non-compression: node %d is NOT "
|
||||
"compressed at depth %d ((%u, %u); total "
|
||||
"nodes: %lu; size: %zu; recompress: %d; attempted: %d)",
|
||||
at, compress_param, low_raw, high_raw, ql.node_count(), node->sz, node->recompress,
|
||||
node->attempted_compress);
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
|
||||
/* Verify list metadata matches physical list contents. */
|
||||
static int ql_verify(const QList& ql, uint32_t nc, uint32_t count, uint32_t head_count,
|
||||
uint32_t tail_count) {
|
||||
int errors = 0;
|
||||
|
||||
if (nc != ql.node_count()) {
|
||||
LOG(ERROR) << "quicklist length wrong: expected " << nc << " got " << ql.node_count();
|
||||
errors++;
|
||||
}
|
||||
|
||||
if (count != ql.Size()) {
|
||||
LOG(ERROR) << "quicklist count wrong: expected " << count << " got " << ql.Size();
|
||||
errors++;
|
||||
}
|
||||
|
||||
auto* node = ql.Head();
|
||||
size_t node_size = 0;
|
||||
while (node) {
|
||||
node_size += node->count;
|
||||
node = node->next;
|
||||
}
|
||||
|
||||
if (node_size != ql.Size()) {
|
||||
LOG(ERROR) << "quicklist cached count not match actual count: expected " << ql.Size() << " got "
|
||||
<< node_size;
|
||||
errors++;
|
||||
}
|
||||
|
||||
node = ql.Tail();
|
||||
node_size = 0;
|
||||
while (node) {
|
||||
node_size += node->count;
|
||||
node = node->prev;
|
||||
}
|
||||
if (node_size != ql.Size()) {
|
||||
LOG(ERROR) << "has different forward count than reverse count! "
|
||||
"Forward count is "
|
||||
<< ql.Size() << ", reverse count is " << node_size;
|
||||
errors++;
|
||||
}
|
||||
|
||||
if (ql.node_count() == 0 && errors == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ql.Head() && head_count != ql.Head()->count && head_count != lpLength(ql.Head()->entry)) {
|
||||
LOG(ERROR) << absl::StrFormat("head count wrong: expected %u got cached %u vs. actual %lu",
|
||||
head_count, ql.Head()->count, lpLength(ql.Head()->entry));
|
||||
errors++;
|
||||
}
|
||||
|
||||
if (ql.Tail() && tail_count != ql.Tail()->count && tail_count != lpLength(ql.Tail()->entry)) {
|
||||
LOG(ERROR) << "tail count wrong: expected " << tail_count << "got cached " << ql.Tail()->count
|
||||
<< " vs. actual " << lpLength(ql.Tail()->entry);
|
||||
errors++;
|
||||
}
|
||||
|
||||
errors += _ql_verify_compress(ql);
|
||||
return errors;
|
||||
}
|
||||
|
||||
class QListTest : public ::testing::Test {
|
||||
protected:
|
||||
QListTest() : mr_(mi_heap_get_backing()) {
|
||||
|
@ -183,4 +278,100 @@ TEST_P(OptionsTest, Numbers) {
|
|||
EXPECT_EQ("xxxxxxxxxxxxxxxxxxxx", it.Get().view());
|
||||
}
|
||||
|
||||
}; // namespace dfly
|
||||
TEST_P(OptionsTest, DelRangeA) {
|
||||
auto [fill, compress] = GetParam();
|
||||
ql_ = QList(fill, compress);
|
||||
long long nums[5000];
|
||||
for (int i = 0; i < 33; i++) {
|
||||
nums[i] = -5157318210846258176 + i;
|
||||
ql_.Push(absl::StrCat(nums[i]), QList::TAIL);
|
||||
}
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 2, 33, 32, 1);
|
||||
|
||||
/* ltrim 3 3 (keep [3,3] inclusive = 1 remaining) */
|
||||
ql_.Erase(0, 3);
|
||||
ql_.Erase(-29, 4000); /* make sure not loop forever */
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 1, 1, 1, 1);
|
||||
|
||||
auto it = ql_.GetIterator(0);
|
||||
ASSERT_TRUE(it.Next());
|
||||
EXPECT_EQ(-5157318210846258173, it.Get().ival());
|
||||
}
|
||||
|
||||
TEST_P(OptionsTest, DelRangeB) {
|
||||
auto [fill, _] = GetParam();
|
||||
ql_ = QList(fill, QUICKLIST_NOCOMPRESS); // ignore compress parameter
|
||||
|
||||
long long nums[5000];
|
||||
for (int i = 0; i < 33; i++) {
|
||||
nums[i] = i;
|
||||
ql_.Push(absl::StrCat(nums[i]), QList::TAIL);
|
||||
}
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 2, 33, 32, 1);
|
||||
|
||||
/* ltrim 5 16 (keep [5,16] inclusive = 12 remaining) */
|
||||
ql_.Erase(0, 5);
|
||||
ql_.Erase(-16, 16);
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 1, 12, 12, 12);
|
||||
|
||||
auto it = ql_.GetIterator(0);
|
||||
ASSERT_TRUE(it.Next());
|
||||
EXPECT_EQ(5, it.Get().ival());
|
||||
|
||||
it = ql_.GetIterator(-1);
|
||||
ASSERT_TRUE(it.Next());
|
||||
EXPECT_EQ(16, it.Get().ival());
|
||||
|
||||
ql_.Push("bobobob", QList::TAIL);
|
||||
it = ql_.GetIterator(-1);
|
||||
ASSERT_TRUE(it.Next());
|
||||
EXPECT_EQ("bobobob", it.Get().view());
|
||||
|
||||
for (int i = 0; i < 12; i++) {
|
||||
it = ql_.GetIterator(i);
|
||||
ASSERT_TRUE(it.Next());
|
||||
EXPECT_EQ(i + 5, it.Get().ival());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(OptionsTest, DelRangeC) {
|
||||
auto [fill, compress] = GetParam();
|
||||
ql_ = QList(fill, compress);
|
||||
long long nums[5000];
|
||||
for (int i = 0; i < 33; i++) {
|
||||
nums[i] = -5157318210846258176 + i;
|
||||
ql_.Push(absl::StrCat(nums[i]), QList::TAIL);
|
||||
}
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 2, 33, 32, 1);
|
||||
|
||||
/* ltrim 3 3 (keep [3,3] inclusive = 1 remaining) */
|
||||
ql_.Erase(0, 3);
|
||||
ql_.Erase(-29, 4000); /* make sure not loop forever */
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 1, 1, 1, 1);
|
||||
auto it = ql_.GetIterator(0);
|
||||
ASSERT_TRUE(it.Next());
|
||||
ASSERT_EQ(-5157318210846258173, it.Get().ival());
|
||||
}
|
||||
|
||||
TEST_P(OptionsTest, DelRangeD) {
|
||||
auto [fill, compress] = GetParam();
|
||||
ql_ = QList(fill, compress);
|
||||
long long nums[5000];
|
||||
for (int i = 0; i < 33; i++) {
|
||||
nums[i] = -5157318210846258176 + i;
|
||||
ql_.Push(absl::StrCat(nums[i]), QList::TAIL);
|
||||
}
|
||||
if (fill == 32)
|
||||
ql_verify(ql_, 2, 33, 32, 1);
|
||||
ql_.Erase(-12, 3);
|
||||
|
||||
ASSERT_EQ(30, ql_.Size());
|
||||
}
|
||||
|
||||
} // namespace dfly
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
#include "core/search/base.h"
|
||||
|
||||
#include <absl/strings/numbers.h>
|
||||
|
||||
namespace dfly::search {
|
||||
|
||||
std::string_view QueryParams::operator[](std::string_view name) const {
|
||||
|
@ -37,4 +39,11 @@ WrappedStrPtr::operator std::string_view() const {
|
|||
return std::string_view{ptr.get(), std::strlen(ptr.get())};
|
||||
}
|
||||
|
||||
std::optional<double> ParseNumericField(std::string_view value) {
|
||||
double value_as_double;
|
||||
if (absl::SimpleAtod(value, &value_as_double))
|
||||
return value_as_double;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace dfly::search
|
||||
|
|
|
@ -68,11 +68,18 @@ using SortableValue = std::variant<std::monostate, double, std::string>;
|
|||
struct DocumentAccessor {
|
||||
using VectorInfo = search::OwnedFtVector;
|
||||
using StringList = absl::InlinedVector<std::string_view, 1>;
|
||||
using NumsList = absl::InlinedVector<double, 1>;
|
||||
|
||||
virtual ~DocumentAccessor() = default;
|
||||
|
||||
virtual StringList GetStrings(std::string_view active_field) const = 0;
|
||||
virtual VectorInfo GetVector(std::string_view active_field) const = 0;
|
||||
/* Returns nullopt if the specified field is not a list of strings */
|
||||
virtual std::optional<StringList> GetStrings(std::string_view active_field) const = 0;
|
||||
|
||||
/* Returns nullopt if the specified field is not a vector */
|
||||
virtual std::optional<VectorInfo> GetVector(std::string_view active_field) const = 0;
|
||||
|
||||
/* Return nullopt if the specified field is not a list of doubles */
|
||||
virtual std::optional<NumsList> GetNumbers(std::string_view active_field) const = 0;
|
||||
};
|
||||
|
||||
// Base class for type-specific indices.
|
||||
|
@ -81,8 +88,10 @@ struct DocumentAccessor {
|
|||
// query functions. All results for all index types should be sorted.
|
||||
struct BaseIndex {
|
||||
virtual ~BaseIndex() = default;
|
||||
virtual void Add(DocId id, DocumentAccessor* doc, std::string_view field) = 0;
|
||||
virtual void Remove(DocId id, DocumentAccessor* doc, std::string_view field) = 0;
|
||||
|
||||
// Returns true if the document was added / indexed
|
||||
virtual bool Add(DocId id, const DocumentAccessor& doc, std::string_view field) = 0;
|
||||
virtual void Remove(DocId id, const DocumentAccessor& doc, std::string_view field) = 0;
|
||||
};
|
||||
|
||||
// Base class for type-specific sorting indices.
|
||||
|
@ -91,4 +100,20 @@ struct BaseSortIndex : BaseIndex {
|
|||
virtual std::vector<ResultScore> Sort(std::vector<DocId>* ids, size_t limit, bool desc) const = 0;
|
||||
};
|
||||
|
||||
/* Used for converting field values to double. Returns std::nullopt if the conversion fails */
|
||||
std::optional<double> ParseNumericField(std::string_view value);
|
||||
|
||||
/* Temporary method to create an empty std::optional<InlinedVector> in DocumentAccessor::GetString
|
||||
and DocumentAccessor::GetNumbers methods. The problem is that due to internal implementation
|
||||
details of absl::InlineVector, we are getting a -Wmaybe-uninitialized compiler warning. To
|
||||
suppress this false warning, we temporarily disable it around this block of code using GCC
|
||||
diagnostic directives. */
|
||||
template <typename InlinedVector> std::optional<InlinedVector> EmptyAccessResult() {
|
||||
// GCC 13.1 throws spurious warnings around this code.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
|
||||
return InlinedVector{};
|
||||
#pragma GCC diagnostic pop
|
||||
}
|
||||
|
||||
} // namespace dfly::search
|
||||
|
|
|
@ -71,19 +71,22 @@ absl::flat_hash_set<string> NormalizeTags(string_view taglist, bool case_sensiti
|
|||
NumericIndex::NumericIndex(PMR_NS::memory_resource* mr) : entries_{mr} {
|
||||
}
|
||||
|
||||
void NumericIndex::Add(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
for (auto str : doc->GetStrings(field)) {
|
||||
double num;
|
||||
if (absl::SimpleAtod(str, &num))
|
||||
entries_.emplace(num, id);
|
||||
bool NumericIndex::Add(DocId id, const DocumentAccessor& doc, string_view field) {
|
||||
auto numbers = doc.GetNumbers(field);
|
||||
if (!numbers) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (auto num : numbers.value()) {
|
||||
entries_.emplace(num, id);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void NumericIndex::Remove(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
for (auto str : doc->GetStrings(field)) {
|
||||
double num;
|
||||
if (absl::SimpleAtod(str, &num))
|
||||
entries_.erase({num, id});
|
||||
void NumericIndex::Remove(DocId id, const DocumentAccessor& doc, string_view field) {
|
||||
auto numbers = doc.GetNumbers(field).value();
|
||||
for (auto num : numbers) {
|
||||
entries_.erase({num, id});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -139,19 +142,27 @@ typename BaseStringIndex<C>::Container* BaseStringIndex<C>::GetOrCreate(string_v
|
|||
}
|
||||
|
||||
template <typename C>
|
||||
void BaseStringIndex<C>::Add(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
bool BaseStringIndex<C>::Add(DocId id, const DocumentAccessor& doc, string_view field) {
|
||||
auto strings_list = doc.GetStrings(field);
|
||||
if (!strings_list) {
|
||||
return false;
|
||||
}
|
||||
|
||||
absl::flat_hash_set<std::string> tokens;
|
||||
for (string_view str : doc->GetStrings(field))
|
||||
for (string_view str : strings_list.value())
|
||||
tokens.merge(Tokenize(str));
|
||||
|
||||
for (string_view token : tokens)
|
||||
GetOrCreate(token)->Insert(id);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename C>
|
||||
void BaseStringIndex<C>::Remove(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
void BaseStringIndex<C>::Remove(DocId id, const DocumentAccessor& doc, string_view field) {
|
||||
auto strings_list = doc.GetStrings(field).value();
|
||||
|
||||
absl::flat_hash_set<std::string> tokens;
|
||||
for (string_view str : doc->GetStrings(field))
|
||||
for (string_view str : strings_list)
|
||||
tokens.merge(Tokenize(str));
|
||||
|
||||
for (const auto& token : tokens) {
|
||||
|
@ -192,6 +203,20 @@ std::pair<size_t /*dim*/, VectorSimilarity> BaseVectorIndex::Info() const {
|
|||
return {dim_, sim_};
|
||||
}
|
||||
|
||||
bool BaseVectorIndex::Add(DocId id, const DocumentAccessor& doc, std::string_view field) {
|
||||
auto vector = doc.GetVector(field);
|
||||
if (!vector)
|
||||
return false;
|
||||
|
||||
auto& [ptr, size] = vector.value();
|
||||
if (ptr && size != dim_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
AddVector(id, ptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
FlatVectorIndex::FlatVectorIndex(const SchemaField::VectorParams& params,
|
||||
PMR_NS::memory_resource* mr)
|
||||
: BaseVectorIndex{params.dim, params.sim}, entries_{mr} {
|
||||
|
@ -199,19 +224,18 @@ FlatVectorIndex::FlatVectorIndex(const SchemaField::VectorParams& params,
|
|||
entries_.reserve(params.capacity * params.dim);
|
||||
}
|
||||
|
||||
void FlatVectorIndex::Add(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
void FlatVectorIndex::AddVector(DocId id, const VectorPtr& vector) {
|
||||
DCHECK_LE(id * dim_, entries_.size());
|
||||
if (id * dim_ == entries_.size())
|
||||
entries_.resize((id + 1) * dim_);
|
||||
|
||||
// TODO: Let get vector write to buf itself
|
||||
auto [ptr, size] = doc->GetVector(field);
|
||||
|
||||
if (size == dim_)
|
||||
memcpy(&entries_[id * dim_], ptr.get(), dim_ * sizeof(float));
|
||||
if (vector) {
|
||||
memcpy(&entries_[id * dim_], vector.get(), dim_ * sizeof(float));
|
||||
}
|
||||
}
|
||||
|
||||
void FlatVectorIndex::Remove(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
void FlatVectorIndex::Remove(DocId id, const DocumentAccessor& doc, string_view field) {
|
||||
// noop
|
||||
}
|
||||
|
||||
|
@ -229,7 +253,7 @@ struct HnswlibAdapter {
|
|||
100 /* seed*/} {
|
||||
}
|
||||
|
||||
void Add(float* data, DocId id) {
|
||||
void Add(const float* data, DocId id) {
|
||||
if (world_.cur_element_count + 1 >= world_.max_elements_)
|
||||
world_.resizeIndex(world_.cur_element_count * 2);
|
||||
world_.addPoint(data, id);
|
||||
|
@ -298,10 +322,10 @@ HnswVectorIndex::HnswVectorIndex(const SchemaField::VectorParams& params, PMR_NS
|
|||
HnswVectorIndex::~HnswVectorIndex() {
|
||||
}
|
||||
|
||||
void HnswVectorIndex::Add(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
auto [ptr, size] = doc->GetVector(field);
|
||||
if (size == dim_)
|
||||
adapter_->Add(ptr.get(), id);
|
||||
void HnswVectorIndex::AddVector(DocId id, const VectorPtr& vector) {
|
||||
if (vector) {
|
||||
adapter_->Add(vector.get(), id);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<float, DocId>> HnswVectorIndex::Knn(float* target, size_t k,
|
||||
|
@ -314,7 +338,7 @@ std::vector<std::pair<float, DocId>> HnswVectorIndex::Knn(float* target, size_t
|
|||
return adapter_->Knn(target, k, ef, allowed);
|
||||
}
|
||||
|
||||
void HnswVectorIndex::Remove(DocId id, DocumentAccessor* doc, string_view field) {
|
||||
void HnswVectorIndex::Remove(DocId id, const DocumentAccessor& doc, string_view field) {
|
||||
adapter_->Remove(id);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ namespace dfly::search {
|
|||
struct NumericIndex : public BaseIndex {
|
||||
explicit NumericIndex(PMR_NS::memory_resource* mr);
|
||||
|
||||
void Add(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
bool Add(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
void Remove(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
|
||||
std::vector<DocId> Range(double l, double r) const;
|
||||
|
||||
|
@ -44,8 +44,8 @@ template <typename C> struct BaseStringIndex : public BaseIndex {
|
|||
|
||||
BaseStringIndex(PMR_NS::memory_resource* mr, bool case_sensitive);
|
||||
|
||||
void Add(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
bool Add(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
void Remove(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
|
||||
// Used by Add & Remove to tokenize text value
|
||||
virtual absl::flat_hash_set<std::string> Tokenize(std::string_view value) const = 0;
|
||||
|
@ -53,7 +53,7 @@ template <typename C> struct BaseStringIndex : public BaseIndex {
|
|||
// Pointer is valid as long as index is not mutated. Nullptr if not found
|
||||
const Container* Matching(std::string_view str) const;
|
||||
|
||||
// Iterate over all Machting on prefix.
|
||||
// Iterate over all Matching on prefix.
|
||||
void MatchingPrefix(std::string_view prefix, absl::FunctionRef<void(const Container*)> cb) const;
|
||||
|
||||
// Returns all the terms that appear as keys in the reverse index.
|
||||
|
@ -97,9 +97,14 @@ struct TagIndex : public BaseStringIndex<SortedVector> {
|
|||
struct BaseVectorIndex : public BaseIndex {
|
||||
std::pair<size_t /*dim*/, VectorSimilarity> Info() const;
|
||||
|
||||
bool Add(DocId id, const DocumentAccessor& doc, std::string_view field) override final;
|
||||
|
||||
protected:
|
||||
BaseVectorIndex(size_t dim, VectorSimilarity sim);
|
||||
|
||||
using VectorPtr = decltype(std::declval<OwnedFtVector>().first);
|
||||
virtual void AddVector(DocId id, const VectorPtr& vector) = 0;
|
||||
|
||||
size_t dim_;
|
||||
VectorSimilarity sim_;
|
||||
};
|
||||
|
@ -109,11 +114,13 @@ struct BaseVectorIndex : public BaseIndex {
|
|||
struct FlatVectorIndex : public BaseVectorIndex {
|
||||
FlatVectorIndex(const SchemaField::VectorParams& params, PMR_NS::memory_resource* mr);
|
||||
|
||||
void Add(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
|
||||
const float* Get(DocId doc) const;
|
||||
|
||||
protected:
|
||||
void AddVector(DocId id, const VectorPtr& vector) override;
|
||||
|
||||
private:
|
||||
PMR_NS::vector<float> entries_;
|
||||
};
|
||||
|
@ -124,13 +131,15 @@ struct HnswVectorIndex : public BaseVectorIndex {
|
|||
HnswVectorIndex(const SchemaField::VectorParams& params, PMR_NS::memory_resource* mr);
|
||||
~HnswVectorIndex();
|
||||
|
||||
void Add(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
|
||||
std::vector<std::pair<float, DocId>> Knn(float* target, size_t k, std::optional<size_t> ef) const;
|
||||
std::vector<std::pair<float, DocId>> Knn(float* target, size_t k, std::optional<size_t> ef,
|
||||
const std::vector<DocId>& allowed) const;
|
||||
|
||||
protected:
|
||||
void AddVector(DocId id, const VectorPtr& vector) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<HnswlibAdapter> adapter_;
|
||||
};
|
||||
|
|
|
@ -571,23 +571,48 @@ void FieldIndices::CreateSortIndices(PMR_NS::memory_resource* mr) {
|
|||
}
|
||||
}
|
||||
|
||||
void FieldIndices::Add(DocId doc, DocumentAccessor* access) {
|
||||
for (auto& [field, index] : indices_)
|
||||
index->Add(doc, access, field);
|
||||
for (auto& [field, sort_index] : sort_indices_)
|
||||
sort_index->Add(doc, access, field);
|
||||
bool FieldIndices::Add(DocId doc, const DocumentAccessor& access) {
|
||||
bool was_added = true;
|
||||
|
||||
std::vector<std::pair<std::string_view, BaseIndex*>> successfully_added_indices;
|
||||
successfully_added_indices.reserve(indices_.size() + sort_indices_.size());
|
||||
|
||||
auto try_add = [&](const auto& indices_container) {
|
||||
for (auto& [field, index] : indices_container) {
|
||||
if (index->Add(doc, access, field)) {
|
||||
successfully_added_indices.emplace_back(field, index.get());
|
||||
} else {
|
||||
was_added = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
try_add(indices_);
|
||||
|
||||
if (was_added) {
|
||||
try_add(sort_indices_);
|
||||
}
|
||||
|
||||
if (!was_added) {
|
||||
for (auto& [field, index] : successfully_added_indices) {
|
||||
index->Remove(doc, access, field);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
all_ids_.insert(upper_bound(all_ids_.begin(), all_ids_.end(), doc), doc);
|
||||
return true;
|
||||
}
|
||||
|
||||
void FieldIndices::Remove(DocId doc, DocumentAccessor* access) {
|
||||
void FieldIndices::Remove(DocId doc, const DocumentAccessor& access) {
|
||||
for (auto& [field, index] : indices_)
|
||||
index->Remove(doc, access, field);
|
||||
for (auto& [field, sort_index] : sort_indices_)
|
||||
sort_index->Remove(doc, access, field);
|
||||
|
||||
auto it = lower_bound(all_ids_.begin(), all_ids_.end(), doc);
|
||||
CHECK(it != all_ids_.end() && *it == doc);
|
||||
DCHECK(it != all_ids_.end() && *it == doc);
|
||||
all_ids_.erase(it);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,8 +77,9 @@ class FieldIndices {
|
|||
// Create indices based on schema and options. Both must outlive the indices
|
||||
FieldIndices(const Schema& schema, const IndicesOptions& options, PMR_NS::memory_resource* mr);
|
||||
|
||||
void Add(DocId doc, DocumentAccessor* access);
|
||||
void Remove(DocId doc, DocumentAccessor* access);
|
||||
// Returns true if document was added
|
||||
bool Add(DocId doc, const DocumentAccessor& access);
|
||||
void Remove(DocId doc, const DocumentAccessor& access);
|
||||
|
||||
BaseIndex* GetIndex(std::string_view field) const;
|
||||
BaseSortIndex* GetSortIndex(std::string_view field) const;
|
||||
|
|
|
@ -44,13 +44,36 @@ struct MockedDocument : public DocumentAccessor {
|
|||
MockedDocument(std::string test_field) : fields_{{"field", test_field}} {
|
||||
}
|
||||
|
||||
StringList GetStrings(string_view field) const override {
|
||||
std::optional<StringList> GetStrings(string_view field) const override {
|
||||
auto it = fields_.find(field);
|
||||
return {it != fields_.end() ? string_view{it->second} : ""};
|
||||
if (it == fields_.end()) {
|
||||
return EmptyAccessResult<StringList>();
|
||||
}
|
||||
return StringList{string_view{it->second}};
|
||||
}
|
||||
|
||||
VectorInfo GetVector(string_view field) const override {
|
||||
return BytesToFtVector(GetStrings(field).front());
|
||||
std::optional<VectorInfo> GetVector(string_view field) const override {
|
||||
auto strings_list = GetStrings(field);
|
||||
if (!strings_list)
|
||||
return std::nullopt;
|
||||
return !strings_list->empty() ? BytesToFtVectorSafe(strings_list->front()) : VectorInfo{};
|
||||
}
|
||||
|
||||
std::optional<NumsList> GetNumbers(std::string_view field) const override {
|
||||
auto strings_list = GetStrings(field);
|
||||
if (!strings_list)
|
||||
return std::nullopt;
|
||||
|
||||
NumsList nums_list;
|
||||
nums_list.reserve(strings_list->size());
|
||||
for (auto str : strings_list.value()) {
|
||||
auto num = ParseNumericField(str);
|
||||
if (!num) {
|
||||
return std::nullopt;
|
||||
}
|
||||
nums_list.push_back(num.value());
|
||||
}
|
||||
return nums_list;
|
||||
}
|
||||
|
||||
string DebugFormat() {
|
||||
|
@ -121,7 +144,7 @@ class SearchTest : public ::testing::Test {
|
|||
|
||||
shuffle(entries_.begin(), entries_.end(), default_random_engine{});
|
||||
for (DocId i = 0; i < entries_.size(); i++)
|
||||
index.Add(i, &entries_[i].first);
|
||||
index.Add(i, entries_[i].first);
|
||||
|
||||
SearchAlgorithm search_algo{};
|
||||
if (!search_algo.Init(query_, ¶ms_)) {
|
||||
|
@ -430,7 +453,7 @@ TEST_F(SearchTest, StopWords) {
|
|||
"explicitly found!"};
|
||||
for (size_t i = 0; i < documents.size(); i++) {
|
||||
MockedDocument doc{{{"title", documents[i]}}};
|
||||
indices.Add(i, &doc);
|
||||
indices.Add(i, doc);
|
||||
}
|
||||
|
||||
// words is a stopword
|
||||
|
@ -484,7 +507,7 @@ TEST_P(KnnTest, Simple1D) {
|
|||
for (size_t i = 0; i < 100; i++) {
|
||||
Map values{{{"even", i % 2 == 0 ? "YES" : "NO"}, {"pos", ToBytes({float(i)})}}};
|
||||
MockedDocument doc{values};
|
||||
indices.Add(i, &doc);
|
||||
indices.Add(i, doc);
|
||||
}
|
||||
|
||||
SearchAlgorithm algo{};
|
||||
|
@ -540,7 +563,7 @@ TEST_P(KnnTest, Simple2D) {
|
|||
for (size_t i = 0; i < ABSL_ARRAYSIZE(kTestCoords); i++) {
|
||||
string coords = ToBytes({kTestCoords[i].first, kTestCoords[i].second});
|
||||
MockedDocument doc{Map{{"pos", coords}}};
|
||||
indices.Add(i, &doc);
|
||||
indices.Add(i, doc);
|
||||
}
|
||||
|
||||
SearchAlgorithm algo{};
|
||||
|
@ -602,7 +625,7 @@ TEST_P(KnnTest, Cosine) {
|
|||
for (size_t i = 0; i < ABSL_ARRAYSIZE(kTestCoords); i++) {
|
||||
string coords = ToBytes({kTestCoords[i].first, kTestCoords[i].second});
|
||||
MockedDocument doc{Map{{"pos", coords}}};
|
||||
indices.Add(i, &doc);
|
||||
indices.Add(i, doc);
|
||||
}
|
||||
|
||||
SearchAlgorithm algo{};
|
||||
|
@ -646,7 +669,7 @@ TEST_P(KnnTest, AddRemove) {
|
|||
vector<MockedDocument> documents(10);
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
documents[i] = Map{{"pos", ToBytes({float(i)})}};
|
||||
indices.Add(i, &documents[i]);
|
||||
indices.Add(i, documents[i]);
|
||||
}
|
||||
|
||||
SearchAlgorithm algo{};
|
||||
|
@ -661,7 +684,7 @@ TEST_P(KnnTest, AddRemove) {
|
|||
|
||||
// delete leftmost 5
|
||||
for (size_t i = 0; i < 5; i++)
|
||||
indices.Remove(i, &documents[i]);
|
||||
indices.Remove(i, documents[i]);
|
||||
|
||||
// search leftmost 5 again
|
||||
{
|
||||
|
@ -672,7 +695,7 @@ TEST_P(KnnTest, AddRemove) {
|
|||
|
||||
// add removed elements
|
||||
for (size_t i = 0; i < 5; i++)
|
||||
indices.Add(i, &documents[i]);
|
||||
indices.Add(i, documents[i]);
|
||||
|
||||
// repeat first search
|
||||
{
|
||||
|
@ -693,7 +716,7 @@ TEST_P(KnnTest, AutoResize) {
|
|||
|
||||
for (size_t i = 0; i < 100; i++) {
|
||||
MockedDocument doc{Map{{"pos", ToBytes({float(i)})}}};
|
||||
indices.Add(i, &doc);
|
||||
indices.Add(i, doc);
|
||||
}
|
||||
|
||||
EXPECT_EQ(indices.GetAllDocs().size(), 100);
|
||||
|
@ -720,7 +743,7 @@ static void BM_VectorSearch(benchmark::State& state) {
|
|||
for (size_t i = 0; i < nvecs; i++) {
|
||||
auto rv = random_vec();
|
||||
MockedDocument doc{Map{{"pos", ToBytes(rv)}}};
|
||||
indices.Add(i, &doc);
|
||||
indices.Add(i, doc);
|
||||
}
|
||||
|
||||
SearchAlgorithm algo{};
|
||||
|
|
|
@ -46,15 +46,23 @@ std::vector<ResultScore> SimpleValueSortIndex<T>::Sort(std::vector<DocId>* ids,
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
void SimpleValueSortIndex<T>::Add(DocId id, DocumentAccessor* doc, std::string_view field) {
|
||||
bool SimpleValueSortIndex<T>::Add(DocId id, const DocumentAccessor& doc, std::string_view field) {
|
||||
auto field_value = Get(doc, field);
|
||||
if (!field_value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DCHECK_LE(id, values_.size()); // Doc ids grow at most by one
|
||||
if (id >= values_.size())
|
||||
values_.resize(id + 1);
|
||||
values_[id] = Get(id, doc, field);
|
||||
|
||||
values_[id] = field_value.value();
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void SimpleValueSortIndex<T>::Remove(DocId id, DocumentAccessor* doc, std::string_view field) {
|
||||
void SimpleValueSortIndex<T>::Remove(DocId id, const DocumentAccessor& doc,
|
||||
std::string_view field) {
|
||||
DCHECK_LT(id, values_.size());
|
||||
values_[id] = T{};
|
||||
}
|
||||
|
@ -66,23 +74,22 @@ template <typename T> PMR_NS::memory_resource* SimpleValueSortIndex<T>::GetMemRe
|
|||
template struct SimpleValueSortIndex<double>;
|
||||
template struct SimpleValueSortIndex<PMR_NS::string>;
|
||||
|
||||
double NumericSortIndex::Get(DocId id, DocumentAccessor* doc, std::string_view field) {
|
||||
auto str = doc->GetStrings(field);
|
||||
if (str.empty())
|
||||
return 0;
|
||||
|
||||
double v;
|
||||
if (!absl::SimpleAtod(str.front(), &v))
|
||||
return 0;
|
||||
return v;
|
||||
std::optional<double> NumericSortIndex::Get(const DocumentAccessor& doc, std::string_view field) {
|
||||
auto numbers_list = doc.GetNumbers(field);
|
||||
if (!numbers_list) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return !numbers_list->empty() ? numbers_list->front() : 0.0;
|
||||
}
|
||||
|
||||
PMR_NS::string StringSortIndex::Get(DocId id, DocumentAccessor* doc, std::string_view field) {
|
||||
auto str = doc->GetStrings(field);
|
||||
if (str.empty())
|
||||
return "";
|
||||
|
||||
return PMR_NS::string{str.front(), GetMemRes()};
|
||||
std::optional<PMR_NS::string> StringSortIndex::Get(const DocumentAccessor& doc,
|
||||
std::string_view field) {
|
||||
auto strings_list = doc.GetStrings(field);
|
||||
if (!strings_list) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return !strings_list->empty() ? PMR_NS::string{strings_list->front(), GetMemRes()}
|
||||
: PMR_NS::string{GetMemRes()};
|
||||
}
|
||||
|
||||
} // namespace dfly::search
|
||||
|
|
|
@ -24,11 +24,11 @@ template <typename T> struct SimpleValueSortIndex : BaseSortIndex {
|
|||
SortableValue Lookup(DocId doc) const override;
|
||||
std::vector<ResultScore> Sort(std::vector<DocId>* ids, size_t limit, bool desc) const override;
|
||||
|
||||
void Add(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
void Remove(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
bool Add(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
void Remove(DocId id, const DocumentAccessor& doc, std::string_view field) override;
|
||||
|
||||
protected:
|
||||
virtual T Get(DocId id, DocumentAccessor* doc, std::string_view field) = 0;
|
||||
virtual std::optional<T> Get(const DocumentAccessor& doc, std::string_view field_value) = 0;
|
||||
|
||||
PMR_NS::memory_resource* GetMemRes() const;
|
||||
|
||||
|
@ -39,14 +39,14 @@ template <typename T> struct SimpleValueSortIndex : BaseSortIndex {
|
|||
struct NumericSortIndex : public SimpleValueSortIndex<double> {
|
||||
NumericSortIndex(PMR_NS::memory_resource* mr) : SimpleValueSortIndex{mr} {};
|
||||
|
||||
double Get(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
std::optional<double> Get(const DocumentAccessor& doc, std::string_view field) override;
|
||||
};
|
||||
|
||||
// TODO: Map tags to integers for fast sort
|
||||
struct StringSortIndex : public SimpleValueSortIndex<PMR_NS::string> {
|
||||
StringSortIndex(PMR_NS::memory_resource* mr) : SimpleValueSortIndex{mr} {};
|
||||
|
||||
PMR_NS::string Get(DocId id, DocumentAccessor* doc, std::string_view field) override;
|
||||
std::optional<PMR_NS::string> Get(const DocumentAccessor& doc, std::string_view field) override;
|
||||
};
|
||||
|
||||
} // namespace dfly::search
|
||||
|
|
|
@ -39,18 +39,28 @@ __attribute__((optimize("fast-math"))) float CosineDistance(const float* u, cons
|
|||
return 0.0f;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
OwnedFtVector BytesToFtVector(string_view value) {
|
||||
DCHECK_EQ(value.size() % sizeof(float), 0u) << value.size();
|
||||
|
||||
OwnedFtVector ConvertToFtVector(string_view value) {
|
||||
// Value cannot be casted directly as it might be not aligned as a float (4 bytes).
|
||||
// Misaligned memory access is UB.
|
||||
size_t size = value.size() / sizeof(float);
|
||||
auto out = make_unique<float[]>(size);
|
||||
memcpy(out.get(), value.data(), size * sizeof(float));
|
||||
|
||||
return {std::move(out), size};
|
||||
return OwnedFtVector{std::move(out), size};
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
OwnedFtVector BytesToFtVector(string_view value) {
|
||||
DCHECK_EQ(value.size() % sizeof(float), 0u) << value.size();
|
||||
return ConvertToFtVector(value);
|
||||
}
|
||||
|
||||
std::optional<OwnedFtVector> BytesToFtVectorSafe(string_view value) {
|
||||
if (value.size() % sizeof(float)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return ConvertToFtVector(value);
|
||||
}
|
||||
|
||||
float VectorDistance(const float* u, const float* v, size_t dims, VectorSimilarity sim) {
|
||||
|
|
|
@ -10,6 +10,10 @@ namespace dfly::search {
|
|||
|
||||
OwnedFtVector BytesToFtVector(std::string_view value);
|
||||
|
||||
// Returns std::nullopt if value can not be converted to the vector
|
||||
// TODO: Remove unsafe version
|
||||
std::optional<OwnedFtVector> BytesToFtVectorSafe(std::string_view value);
|
||||
|
||||
float VectorDistance(const float* u, const float* v, size_t dims, VectorSimilarity sim);
|
||||
|
||||
} // namespace dfly::search
|
||||
|
|
|
@ -6,35 +6,13 @@
|
|||
|
||||
#include "absl/flags/internal/flag.h"
|
||||
#include "base/flags.h"
|
||||
#include "base/logging.h"
|
||||
#include "facade/dragonfly_connection.h"
|
||||
#include "facade/reply_builder.h"
|
||||
|
||||
ABSL_FLAG(bool, experimental_new_io, true,
|
||||
"Use new replying code - should "
|
||||
"reduce latencies for pipelining");
|
||||
|
||||
namespace facade {
|
||||
|
||||
ConnectionContext::ConnectionContext(::io::Sink* stream, Connection* owner) : owner_(owner) {
|
||||
if (owner) {
|
||||
protocol_ = owner->protocol();
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
switch (protocol_) {
|
||||
case Protocol::REDIS: {
|
||||
RedisReplyBuilder* rb = absl::GetFlag(FLAGS_experimental_new_io)
|
||||
? new RedisReplyBuilder2(stream)
|
||||
: new RedisReplyBuilder(stream);
|
||||
rbuilder_.reset(rb);
|
||||
break;
|
||||
}
|
||||
case Protocol::MEMCACHE:
|
||||
rbuilder_.reset(new MCReplyBuilder(stream));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ConnectionContext::ConnectionContext(Connection* owner) : owner_(owner) {
|
||||
conn_closing = false;
|
||||
req_auth = false;
|
||||
replica_conn = false;
|
||||
|
@ -49,7 +27,7 @@ ConnectionContext::ConnectionContext(::io::Sink* stream, Connection* owner) : ow
|
|||
}
|
||||
|
||||
size_t ConnectionContext::UsedMemory() const {
|
||||
return dfly::HeapSize(rbuilder_) + dfly::HeapSize(authed_username) + dfly::HeapSize(acl_commands);
|
||||
return dfly::HeapSize(authed_username) + dfly::HeapSize(acl_commands);
|
||||
}
|
||||
|
||||
} // namespace facade
|
||||
|
|
|
@ -19,7 +19,7 @@ class Connection;
|
|||
|
||||
class ConnectionContext {
|
||||
public:
|
||||
ConnectionContext(::io::Sink* stream, Connection* owner);
|
||||
explicit ConnectionContext(Connection* owner);
|
||||
|
||||
virtual ~ConnectionContext() {
|
||||
}
|
||||
|
@ -32,14 +32,6 @@ class ConnectionContext {
|
|||
return owner_;
|
||||
}
|
||||
|
||||
Protocol protocol() const {
|
||||
return protocol_;
|
||||
}
|
||||
|
||||
SinkReplyBuilder* reply_builder_old() {
|
||||
return rbuilder_.get();
|
||||
}
|
||||
|
||||
virtual size_t UsedMemory() const;
|
||||
|
||||
// connection state / properties.
|
||||
|
@ -71,8 +63,6 @@ class ConnectionContext {
|
|||
|
||||
private:
|
||||
Connection* owner_;
|
||||
Protocol protocol_ = Protocol::REDIS;
|
||||
std::unique_ptr<SinkReplyBuilder> rbuilder_;
|
||||
};
|
||||
|
||||
} // namespace facade
|
||||
|
|
|
@ -479,22 +479,23 @@ void Connection::DispatchOperations::operator()(const PubMessage& pub_msg) {
|
|||
}
|
||||
arr[i++] = pub_msg.channel;
|
||||
arr[i++] = pub_msg.message;
|
||||
rbuilder->SendStringArr(absl::Span<string_view>{arr.data(), i},
|
||||
RedisReplyBuilder::CollectionType::PUSH);
|
||||
rbuilder->SendBulkStrArr(absl::Span<string_view>{arr.data(), i},
|
||||
RedisReplyBuilder::CollectionType::PUSH);
|
||||
}
|
||||
|
||||
void Connection::DispatchOperations::operator()(Connection::PipelineMessage& msg) {
|
||||
DVLOG(2) << "Dispatching pipeline: " << ToSV(msg.args.front());
|
||||
|
||||
self->service_->DispatchCommand(CmdArgList{msg.args.data(), msg.args.size()},
|
||||
self->reply_builder_, self->cc_.get());
|
||||
self->reply_builder_.get(), self->cc_.get());
|
||||
|
||||
self->last_interaction_ = time(nullptr);
|
||||
self->skip_next_squashing_ = false;
|
||||
}
|
||||
|
||||
void Connection::DispatchOperations::operator()(const Connection::MCPipelineMessage& msg) {
|
||||
self->service_->DispatchMC(msg.cmd, msg.value, static_cast<MCReplyBuilder*>(self->reply_builder_),
|
||||
self->service_->DispatchMC(msg.cmd, msg.value,
|
||||
static_cast<MCReplyBuilder*>(self->reply_builder_.get()),
|
||||
self->cc_.get());
|
||||
self->last_interaction_ = time(nullptr);
|
||||
}
|
||||
|
@ -518,7 +519,7 @@ void Connection::DispatchOperations::operator()(const InvalidationMessage& msg)
|
|||
rbuilder->SendNull();
|
||||
} else {
|
||||
std::string_view keys[] = {msg.key};
|
||||
rbuilder->SendStringArr(keys);
|
||||
rbuilder->SendBulkStrArr(keys);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -538,14 +539,13 @@ void UpdateLibNameVerMap(const string& name, const string& ver, int delta) {
|
|||
Connection::Connection(Protocol protocol, util::HttpListenerBase* http_listener, SSL_CTX* ctx,
|
||||
ServiceInterface* service)
|
||||
: io_buf_(kMinReadSize),
|
||||
protocol_(protocol),
|
||||
http_listener_(http_listener),
|
||||
ssl_ctx_(ctx),
|
||||
service_(service),
|
||||
flags_(0) {
|
||||
static atomic_uint32_t next_id{1};
|
||||
|
||||
protocol_ = protocol;
|
||||
|
||||
constexpr size_t kReqSz = sizeof(Connection::PipelineMessage);
|
||||
static_assert(kReqSz <= 256 && kReqSz >= 200);
|
||||
|
||||
|
@ -724,8 +724,7 @@ void Connection::HandleRequests() {
|
|||
// because both Write and Recv internally check if the socket was shut
|
||||
// down and return with an error accordingly.
|
||||
if (http_res && socket_->IsOpen()) {
|
||||
cc_.reset(service_->CreateContext(socket_.get(), this));
|
||||
reply_builder_ = cc_->reply_builder_old();
|
||||
cc_.reset(service_->CreateContext(this));
|
||||
|
||||
if (*http_res) {
|
||||
VLOG(1) << "HTTP1.1 identified";
|
||||
|
@ -745,19 +744,28 @@ void Connection::HandleRequests() {
|
|||
// Release the ownership of the socket from http_conn so it would stay with
|
||||
// this connection.
|
||||
http_conn.ReleaseSocket();
|
||||
} else {
|
||||
} else { // non-http
|
||||
if (breaker_cb_) {
|
||||
socket_->RegisterOnErrorCb([this](int32_t mask) { this->OnBreakCb(mask); });
|
||||
}
|
||||
|
||||
switch (protocol_) {
|
||||
case Protocol::REDIS:
|
||||
reply_builder_.reset(new RedisReplyBuilder(socket_.get()));
|
||||
break;
|
||||
case Protocol::MEMCACHE:
|
||||
reply_builder_.reset(new MCReplyBuilder(socket_.get()));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ConnectionFlow();
|
||||
|
||||
socket_->CancelOnErrorCb(); // noop if nothing is registered.
|
||||
VLOG(1) << "Closed connection for peer "
|
||||
<< GetClientInfo(fb2::ProactorBase::me()->GetPoolIndex());
|
||||
reply_builder_.reset();
|
||||
}
|
||||
VLOG(1) << "Closed connection for peer "
|
||||
<< GetClientInfo(fb2::ProactorBase::me()->GetPoolIndex());
|
||||
cc_.reset();
|
||||
reply_builder_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -929,6 +937,8 @@ io::Result<bool> Connection::CheckForHttpProto() {
|
|||
}
|
||||
|
||||
void Connection::ConnectionFlow() {
|
||||
DCHECK(reply_builder_);
|
||||
|
||||
++stats_->num_conns;
|
||||
++stats_->conn_received_cnt;
|
||||
stats_->read_buf_capacity += io_buf_.Capacity();
|
||||
|
@ -986,7 +996,7 @@ void Connection::ConnectionFlow() {
|
|||
VLOG(1) << "Error parser status " << parser_error_;
|
||||
|
||||
if (redis_parser_) {
|
||||
SendProtocolError(RedisParser::Result(parser_error_), reply_builder_);
|
||||
SendProtocolError(RedisParser::Result(parser_error_), reply_builder_.get());
|
||||
} else {
|
||||
DCHECK(memcache_parser_);
|
||||
reply_builder_->SendProtocolError("bad command line format");
|
||||
|
@ -1089,7 +1099,7 @@ Connection::ParserStatus Connection::ParseRedis() {
|
|||
|
||||
auto dispatch_sync = [this, &parse_args, &cmd_vec] {
|
||||
RespExpr::VecToArgList(parse_args, &cmd_vec);
|
||||
service_->DispatchCommand(absl::MakeSpan(cmd_vec), reply_builder_, cc_.get());
|
||||
service_->DispatchCommand(absl::MakeSpan(cmd_vec), reply_builder_.get(), cc_.get());
|
||||
};
|
||||
auto dispatch_async = [this, &parse_args, tlh = mi_heap_get_backing()]() -> MessageHandle {
|
||||
return {FromArgs(std::move(parse_args), tlh)};
|
||||
|
@ -1134,14 +1144,14 @@ auto Connection::ParseMemcache() -> ParserStatus {
|
|||
string_view value;
|
||||
|
||||
auto dispatch_sync = [this, &cmd, &value] {
|
||||
service_->DispatchMC(cmd, value, static_cast<MCReplyBuilder*>(reply_builder_), cc_.get());
|
||||
service_->DispatchMC(cmd, value, static_cast<MCReplyBuilder*>(reply_builder_.get()), cc_.get());
|
||||
};
|
||||
|
||||
auto dispatch_async = [&cmd, &value]() -> MessageHandle {
|
||||
return {make_unique<MCPipelineMessage>(std::move(cmd), value)};
|
||||
};
|
||||
|
||||
MCReplyBuilder* builder = static_cast<MCReplyBuilder*>(reply_builder_);
|
||||
MCReplyBuilder* builder = static_cast<MCReplyBuilder*>(reply_builder_.get());
|
||||
|
||||
do {
|
||||
string_view str = ToSV(io_buf_.InputBuffer());
|
||||
|
@ -1358,7 +1368,7 @@ bool Connection::ShouldEndDispatchFiber(const MessageHandle& msg) {
|
|||
|
||||
void Connection::SquashPipeline() {
|
||||
DCHECK_EQ(dispatch_q_.size(), pending_pipeline_cmd_cnt_);
|
||||
DCHECK_EQ(reply_builder_->type(), SinkReplyBuilder::REDIS); // Only Redis is supported.
|
||||
DCHECK_EQ(reply_builder_->GetProtocol(), Protocol::REDIS); // Only Redis is supported.
|
||||
|
||||
vector<ArgSlice> squash_cmds;
|
||||
squash_cmds.reserve(dispatch_q_.size());
|
||||
|
@ -1374,10 +1384,10 @@ void Connection::SquashPipeline() {
|
|||
cc_->async_dispatch = true;
|
||||
|
||||
size_t dispatched =
|
||||
service_->DispatchManyCommands(absl::MakeSpan(squash_cmds), reply_builder_, cc_.get());
|
||||
service_->DispatchManyCommands(absl::MakeSpan(squash_cmds), reply_builder_.get(), cc_.get());
|
||||
|
||||
if (pending_pipeline_cmd_cnt_ == squash_cmds.size()) { // Flush if no new commands appeared
|
||||
reply_builder_->FlushBatch();
|
||||
reply_builder_->Flush();
|
||||
reply_builder_->SetBatchMode(false); // in case the next dispatch is sync
|
||||
}
|
||||
|
||||
|
@ -1397,7 +1407,7 @@ void Connection::SquashPipeline() {
|
|||
}
|
||||
|
||||
void Connection::ClearPipelinedMessages() {
|
||||
DispatchOperations dispatch_op{reply_builder_, this};
|
||||
DispatchOperations dispatch_op{reply_builder_.get(), this};
|
||||
|
||||
// Recycle messages even from disconnecting client to keep properly track of memory stats
|
||||
// As well as to avoid pubsub backpressure leakege.
|
||||
|
@ -1445,7 +1455,7 @@ std::string Connection::DebugInfo() const {
|
|||
void Connection::ExecutionFiber() {
|
||||
ThisFiber::SetName("ExecutionFiber");
|
||||
|
||||
DispatchOperations dispatch_op{reply_builder_, this};
|
||||
DispatchOperations dispatch_op{reply_builder_.get(), this};
|
||||
|
||||
size_t squashing_threshold = GetFlag(FLAGS_pipeline_squash);
|
||||
|
||||
|
@ -1498,7 +1508,7 @@ void Connection::ExecutionFiber() {
|
|||
// last command to reply and flush. If it doesn't reply (i.e. is a control message like
|
||||
// migrate), we have to flush manually.
|
||||
if (dispatch_q_.empty() && !msg.IsReplying()) {
|
||||
reply_builder_->FlushBatch();
|
||||
reply_builder_->Flush();
|
||||
}
|
||||
|
||||
if (ShouldEndDispatchFiber(msg)) {
|
||||
|
@ -1809,7 +1819,7 @@ Connection::MemoryUsage Connection::GetMemoryUsage() const {
|
|||
size_t mem = sizeof(*this) + dfly::HeapSize(dispatch_q_) + dfly::HeapSize(name_) +
|
||||
dfly::HeapSize(tmp_parse_args_) + dfly::HeapSize(tmp_cmd_vec_) +
|
||||
dfly::HeapSize(memcache_parser_) + dfly::HeapSize(redis_parser_) +
|
||||
dfly::HeapSize(cc_);
|
||||
dfly::HeapSize(cc_) + dfly::HeapSize(reply_builder_);
|
||||
|
||||
// We add a hardcoded 9k value to accomodate for the part of the Fiber stack that is in use.
|
||||
// The allocated stack is actually larger (~130k), but only a small fraction of that (9k
|
||||
|
|
|
@ -269,10 +269,6 @@ class Connection : public util::Connection {
|
|||
|
||||
bool IsMain() const;
|
||||
|
||||
Protocol protocol() const {
|
||||
return protocol_;
|
||||
}
|
||||
|
||||
void SetName(std::string name);
|
||||
|
||||
void SetLibName(std::string name);
|
||||
|
@ -404,9 +400,7 @@ class Connection : public util::Connection {
|
|||
Protocol protocol_;
|
||||
ConnectionStats* stats_ = nullptr;
|
||||
|
||||
// cc_->reply_builder may change during the lifetime of the connection, due to injections.
|
||||
// This is a pointer to the original, socket based reply builder that never changes.
|
||||
SinkReplyBuilder* reply_builder_ = nullptr;
|
||||
std::unique_ptr<SinkReplyBuilder> reply_builder_;
|
||||
util::HttpListenerBase* http_listener_;
|
||||
SSL_CTX* ssl_ctx_;
|
||||
|
||||
|
|
|
@ -50,6 +50,10 @@ class Listener : public util::ListenerInterface {
|
|||
bool IsPrivilegedInterface() const;
|
||||
bool IsMainInterface() const;
|
||||
|
||||
Protocol protocol() const {
|
||||
return protocol_;
|
||||
}
|
||||
|
||||
private:
|
||||
util::Connection* NewConnection(ProactorBase* proactor) final;
|
||||
ProactorBase* PickConnectionProactor(util::FiberSocketBase* sock) final;
|
||||
|
|
|
@ -209,4 +209,17 @@ ostream& operator<<(ostream& os, facade::RespSpan ras) {
|
|||
return os;
|
||||
}
|
||||
|
||||
ostream& operator<<(ostream& os, facade::Protocol p) {
|
||||
switch (p) {
|
||||
case facade::Protocol::REDIS:
|
||||
os << "REDIS";
|
||||
break;
|
||||
case facade::Protocol::MEMCACHE:
|
||||
os << "MEMCACHE";
|
||||
break;
|
||||
}
|
||||
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace std
|
||||
|
|
|
@ -189,5 +189,6 @@ void ResetStats();
|
|||
|
||||
namespace std {
|
||||
ostream& operator<<(ostream& os, facade::CmdArgList args);
|
||||
ostream& operator<<(ostream& os, facade::Protocol protocol);
|
||||
|
||||
} // namespace std
|
||||
|
|
|
@ -37,8 +37,8 @@ class OkService : public ServiceInterface {
|
|||
builder->SendError("");
|
||||
}
|
||||
|
||||
ConnectionContext* CreateContext(util::FiberSocketBase* peer, Connection* owner) final {
|
||||
return new ConnectionContext{peer, owner};
|
||||
ConnectionContext* CreateContext(Connection* owner) final {
|
||||
return new ConnectionContext{owner};
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -49,10 +49,6 @@ constexpr unsigned kConvFlags =
|
|||
|
||||
DoubleToStringConverter dfly_conv(kConvFlags, "inf", "nan", 'e', -6, 21, 6, 0);
|
||||
|
||||
const char* NullString(bool resp3) {
|
||||
return resp3 ? "_\r\n" : "$-1\r\n";
|
||||
}
|
||||
|
||||
template <typename T> size_t piece_size(const T& v) {
|
||||
if constexpr (is_array_v<T>)
|
||||
return ABSL_ARRAYSIZE(v) - 1; // expect null terminated
|
||||
|
@ -77,21 +73,28 @@ char* write_piece(string_view str, char* dest) {
|
|||
|
||||
} // namespace
|
||||
|
||||
SinkReplyBuilder::MGetResponse::~MGetResponse() {
|
||||
while (storage_list) {
|
||||
auto* next = storage_list->next;
|
||||
delete[] reinterpret_cast<char*>(storage_list);
|
||||
storage_list = next;
|
||||
}
|
||||
SinkReplyBuilder::ReplyAggregator::~ReplyAggregator() {
|
||||
rb->batched_ = prev;
|
||||
if (!prev)
|
||||
rb->Flush();
|
||||
}
|
||||
|
||||
SinkReplyBuilder::SinkReplyBuilder(::io::Sink* sink, Type t)
|
||||
: sink_(sink),
|
||||
should_batch_(false),
|
||||
should_aggregate_(false),
|
||||
has_replied_(true),
|
||||
send_active_(false),
|
||||
type_(t) {
|
||||
SinkReplyBuilder::ReplyScope::~ReplyScope() {
|
||||
rb->scoped_ = prev;
|
||||
if (!prev)
|
||||
rb->FinishScope();
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::SendError(ErrorReply error) {
|
||||
if (error.status)
|
||||
return SendError(*error.status);
|
||||
SendError(error.ToSv(), error.kind);
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::SendError(OpStatus status) {
|
||||
if (status == OpStatus::OK)
|
||||
return SendSimpleString("OK");
|
||||
SendError(StatusToMsg(status));
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::CloseConnection() {
|
||||
|
@ -99,157 +102,7 @@ void SinkReplyBuilder::CloseConnection() {
|
|||
ec_ = std::make_error_code(std::errc::connection_aborted);
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::Send(const iovec* v, uint32_t len) {
|
||||
has_replied_ = true;
|
||||
DCHECK(sink_);
|
||||
constexpr size_t kMaxBatchSize = 1024;
|
||||
|
||||
size_t bsize = 0;
|
||||
for (unsigned i = 0; i < len; ++i) {
|
||||
bsize += v[i].iov_len;
|
||||
}
|
||||
|
||||
// Allow batching with up to kMaxBatchSize of data.
|
||||
if ((should_batch_ || should_aggregate_) && (batch_.size() + bsize < kMaxBatchSize)) {
|
||||
batch_.reserve(batch_.size() + bsize);
|
||||
for (unsigned i = 0; i < len; ++i) {
|
||||
std::string_view src((char*)v[i].iov_base, v[i].iov_len);
|
||||
DVLOG(3) << "Appending to stream " << absl::CHexEscape(src);
|
||||
batch_.append(src.data(), src.size());
|
||||
}
|
||||
DVLOG(2) << "Batched " << bsize << " bytes";
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t before_ns = util::fb2::ProactorBase::GetMonotonicTimeNs();
|
||||
error_code ec;
|
||||
send_active_ = true;
|
||||
tl_facade_stats->reply_stats.io_write_cnt++;
|
||||
tl_facade_stats->reply_stats.io_write_bytes += bsize;
|
||||
DVLOG(2) << "Writing " << bsize + batch_.size() << " bytes of len " << len;
|
||||
|
||||
if (batch_.empty()) {
|
||||
ec = sink_->Write(v, len);
|
||||
} else {
|
||||
DVLOG(3) << "Sending batch to stream :" << absl::CHexEscape(batch_);
|
||||
|
||||
tl_facade_stats->reply_stats.io_write_bytes += batch_.size();
|
||||
if (len == UIO_MAXIOV) {
|
||||
ec = sink_->Write(io::Buffer(batch_));
|
||||
if (!ec) {
|
||||
ec = sink_->Write(v, len);
|
||||
}
|
||||
} else {
|
||||
iovec tmp[len + 1];
|
||||
tmp[0].iov_base = batch_.data();
|
||||
tmp[0].iov_len = batch_.size();
|
||||
copy(v, v + len, tmp + 1);
|
||||
ec = sink_->Write(tmp, len + 1);
|
||||
}
|
||||
batch_.clear();
|
||||
}
|
||||
send_active_ = false;
|
||||
int64_t after_ns = util::fb2::ProactorBase::GetMonotonicTimeNs();
|
||||
tl_facade_stats->reply_stats.send_stats.count++;
|
||||
tl_facade_stats->reply_stats.send_stats.total_duration += (after_ns - before_ns) / 1'000;
|
||||
|
||||
if (ec) {
|
||||
DVLOG(1) << "Error writing to stream: " << ec.message();
|
||||
ec_ = ec;
|
||||
}
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::SendRaw(std::string_view raw) {
|
||||
iovec v = {IoVec(raw)};
|
||||
|
||||
Send(&v, 1);
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::ExpectReply() {
|
||||
has_replied_ = false;
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::SendError(ErrorReply error) {
|
||||
if (error.status)
|
||||
return SendError(*error.status);
|
||||
|
||||
SendError(error.ToSv(), error.kind);
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::SendError(OpStatus status) {
|
||||
if (status == OpStatus::OK) {
|
||||
SendOk();
|
||||
} else {
|
||||
SendError(StatusToMsg(status));
|
||||
}
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::StartAggregate() {
|
||||
DVLOG(1) << "StartAggregate";
|
||||
should_aggregate_ = true;
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::StopAggregate() {
|
||||
DVLOG(1) << "StopAggregate";
|
||||
should_aggregate_ = false;
|
||||
|
||||
if (should_batch_)
|
||||
return;
|
||||
|
||||
FlushBatch();
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::SetBatchMode(bool batch) {
|
||||
DVLOG(1) << "SetBatchMode(" << (batch ? "true" : "false") << ")";
|
||||
should_batch_ = batch;
|
||||
}
|
||||
|
||||
void SinkReplyBuilder::FlushBatch() {
|
||||
if (batch_.empty())
|
||||
return;
|
||||
|
||||
error_code ec = sink_->Write(io::Buffer(batch_));
|
||||
batch_.clear();
|
||||
if (ec) {
|
||||
DVLOG(1) << "Error flushing to stream: " << ec.message();
|
||||
ec_ = ec;
|
||||
}
|
||||
}
|
||||
|
||||
size_t SinkReplyBuilder::UsedMemory() const {
|
||||
return dfly::HeapSize(batch_);
|
||||
}
|
||||
|
||||
SinkReplyBuilder2::ReplyAggregator::~ReplyAggregator() {
|
||||
rb->batched_ = prev;
|
||||
if (!prev)
|
||||
rb->Flush();
|
||||
}
|
||||
|
||||
SinkReplyBuilder2::ReplyScope::~ReplyScope() {
|
||||
rb->scoped_ = prev;
|
||||
if (!prev)
|
||||
rb->FinishScope();
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::SendError(ErrorReply error) {
|
||||
if (error.status)
|
||||
return SendError(*error.status);
|
||||
SendError(error.ToSv(), error.kind);
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::SendError(OpStatus status) {
|
||||
if (status == OpStatus::OK)
|
||||
return SendSimpleString("OK");
|
||||
SendError(StatusToMsg(status));
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::CloseConnection() {
|
||||
if (!ec_)
|
||||
ec_ = std::make_error_code(std::errc::connection_aborted);
|
||||
}
|
||||
|
||||
template <typename... Ts> void SinkReplyBuilder2::WritePieces(Ts&&... pieces) {
|
||||
template <typename... Ts> void SinkReplyBuilder::WritePieces(Ts&&... pieces) {
|
||||
if (size_t required = (piece_size(pieces) + ...); buffer_.AppendLen() <= required)
|
||||
Flush(required);
|
||||
|
||||
|
@ -268,12 +121,12 @@ template <typename... Ts> void SinkReplyBuilder2::WritePieces(Ts&&... pieces) {
|
|||
total_size_ += written;
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::WriteRef(std::string_view str) {
|
||||
void SinkReplyBuilder::WriteRef(std::string_view str) {
|
||||
NextVec(str);
|
||||
total_size_ += str.size();
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::Flush(size_t expected_buffer_cap) {
|
||||
void SinkReplyBuilder::Flush(size_t expected_buffer_cap) {
|
||||
Send();
|
||||
|
||||
// Grow backing buffer if was at least half full and still below it's max size
|
||||
|
@ -290,7 +143,7 @@ void SinkReplyBuilder2::Flush(size_t expected_buffer_cap) {
|
|||
buffer_.Reserve(expected_buffer_cap);
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::Send() {
|
||||
void SinkReplyBuilder::Send() {
|
||||
auto& reply_stats = tl_facade_stats->reply_stats;
|
||||
|
||||
send_active_ = true;
|
||||
|
@ -308,7 +161,9 @@ void SinkReplyBuilder2::Send() {
|
|||
send_active_ = false;
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::FinishScope() {
|
||||
void SinkReplyBuilder::FinishScope() {
|
||||
replies_recorded_++;
|
||||
|
||||
if (!batched_ || total_size_ * 2 >= kMaxBufferSize)
|
||||
return Flush();
|
||||
|
||||
|
@ -332,22 +187,37 @@ void SinkReplyBuilder2::FinishScope() {
|
|||
guaranteed_pieces_ = vecs_.size(); // all vecs are pieces
|
||||
}
|
||||
|
||||
void SinkReplyBuilder2::NextVec(std::string_view str) {
|
||||
void SinkReplyBuilder::NextVec(std::string_view str) {
|
||||
if (vecs_.size() >= IOV_MAX - 2)
|
||||
Flush();
|
||||
vecs_.push_back(iovec{const_cast<char*>(str.data()), str.size()});
|
||||
}
|
||||
|
||||
MCReplyBuilder::MCReplyBuilder(::io::Sink* sink) : SinkReplyBuilder(sink, MC), noreply_(false) {
|
||||
MCReplyBuilder::MCReplyBuilder(::io::Sink* sink) : SinkReplyBuilder(sink), noreply_(false) {
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendValue(std::string_view key, std::string_view value, uint64_t mc_ver,
|
||||
uint32_t mc_flag) {
|
||||
ReplyScope scope(this);
|
||||
WritePieces("VALUE ", key, " ", mc_flag, " ", value.size());
|
||||
if (mc_ver)
|
||||
WritePieces(" ", mc_ver);
|
||||
|
||||
if (value.size() <= kMaxInlineSize) {
|
||||
WritePieces(kCRLF, value, kCRLF);
|
||||
} else {
|
||||
WritePieces(kCRLF);
|
||||
WriteRef(value);
|
||||
WritePieces(kCRLF);
|
||||
}
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendSimpleString(std::string_view str) {
|
||||
if (noreply_)
|
||||
return;
|
||||
|
||||
iovec v[2] = {IoVec(str), IoVec(kCRLF)};
|
||||
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
ReplyScope scope(this);
|
||||
WritePieces(str, kCRLF);
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendStored() {
|
||||
|
@ -355,31 +225,11 @@ void MCReplyBuilder::SendStored() {
|
|||
}
|
||||
|
||||
void MCReplyBuilder::SendLong(long val) {
|
||||
char buf[32];
|
||||
char* next = absl::numbers_internal::FastIntToBuffer(val, buf);
|
||||
SendSimpleString(string_view(buf, next - buf));
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendMGetResponse(MGetResponse resp) {
|
||||
string header;
|
||||
for (unsigned i = 0; i < resp.resp_arr.size(); ++i) {
|
||||
if (resp.resp_arr[i]) {
|
||||
const auto& src = *resp.resp_arr[i];
|
||||
absl::StrAppend(&header, "VALUE ", src.key, " ", src.mc_flag, " ", src.value.size());
|
||||
if (src.mc_ver) {
|
||||
absl::StrAppend(&header, " ", src.mc_ver);
|
||||
}
|
||||
|
||||
absl::StrAppend(&header, "\r\n");
|
||||
iovec v[] = {IoVec(header), IoVec(src.value), IoVec(kCRLF)};
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
header.clear();
|
||||
}
|
||||
}
|
||||
SendSimpleString("END");
|
||||
SendSimpleString(absl::StrCat(val));
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendError(string_view str, std::string_view type) {
|
||||
last_error_ = str;
|
||||
SendSimpleString(absl::StrCat("SERVER_ERROR ", str));
|
||||
}
|
||||
|
||||
|
@ -387,13 +237,8 @@ void MCReplyBuilder::SendProtocolError(std::string_view str) {
|
|||
SendSimpleString(absl::StrCat("CLIENT_ERROR ", str));
|
||||
}
|
||||
|
||||
bool MCReplyBuilder::NoReply() const {
|
||||
return noreply_;
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendClientError(string_view str) {
|
||||
iovec v[] = {IoVec("CLIENT_ERROR "), IoVec(str), IoVec(kCRLF)};
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
SendSimpleString(absl::StrCat("CLIENT_ERROR ", str));
|
||||
}
|
||||
|
||||
void MCReplyBuilder::SendSetSkipped() {
|
||||
|
@ -404,455 +249,18 @@ void MCReplyBuilder::SendNotFound() {
|
|||
SendSimpleString("NOT_FOUND");
|
||||
}
|
||||
|
||||
MCReplyBuilder2::MCReplyBuilder2(::io::Sink* sink) : SinkReplyBuilder2(sink), noreply_(false) {
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendValue(std::string_view key, std::string_view value, uint64_t mc_ver,
|
||||
uint32_t mc_flag) {
|
||||
void MCReplyBuilder::SendRaw(std::string_view str) {
|
||||
ReplyScope scope(this);
|
||||
WritePieces("VALUE ", key, " ", mc_flag, " ", value.size());
|
||||
if (mc_ver)
|
||||
WritePieces(" ", mc_ver);
|
||||
|
||||
if (value.size() <= kMaxInlineSize) {
|
||||
WritePieces(value, kCRLF);
|
||||
} else {
|
||||
WriteRef(value);
|
||||
WritePieces(kCRLF);
|
||||
}
|
||||
WriteRef(str);
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendSimpleString(std::string_view str) {
|
||||
if (noreply_)
|
||||
return;
|
||||
|
||||
void RedisReplyBuilderBase::SendNull() {
|
||||
ReplyScope scope(this);
|
||||
WritePieces(str, kCRLF);
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendStored() {
|
||||
SendSimpleString("STORED");
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendLong(long val) {
|
||||
SendSimpleString(absl::StrCat(val));
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendError(string_view str, std::string_view type) {
|
||||
SendSimpleString(absl::StrCat("SERVER_ERROR ", str));
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendProtocolError(std::string_view str) {
|
||||
SendSimpleString(absl::StrCat("CLIENT_ERROR ", str));
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendClientError(string_view str) {
|
||||
SendSimpleString(absl::StrCat("CLIENT_ERROR", str));
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendSetSkipped() {
|
||||
SendSimpleString("NOT_STORED");
|
||||
}
|
||||
|
||||
void MCReplyBuilder2::SendNotFound() {
|
||||
SendSimpleString("NOT_FOUND");
|
||||
}
|
||||
|
||||
char* RedisReplyBuilder::FormatDouble(double val, char* dest, unsigned dest_len) {
|
||||
StringBuilder sb(dest, dest_len);
|
||||
CHECK(dfly_conv.ToShortest(val, &sb));
|
||||
return sb.Finalize();
|
||||
}
|
||||
|
||||
RedisReplyBuilder::RedisReplyBuilder(::io::Sink* sink) : SinkReplyBuilder(sink, REDIS) {
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SetResp3(bool is_resp3) {
|
||||
is_resp3_ = is_resp3;
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendError(string_view str, string_view err_type) {
|
||||
VLOG(1) << "Error: " << str;
|
||||
|
||||
if (err_type.empty()) {
|
||||
err_type = str;
|
||||
if (err_type == kSyntaxErr)
|
||||
err_type = kSyntaxErrType;
|
||||
else if (err_type == kWrongTypeErr)
|
||||
err_type = kWrongTypeErrType;
|
||||
else if (err_type == kScriptNotFound)
|
||||
err_type = kScriptErrType;
|
||||
}
|
||||
|
||||
tl_facade_stats->reply_stats.err_count[err_type]++;
|
||||
|
||||
if (str[0] == '-') {
|
||||
iovec v[] = {IoVec(str), IoVec(kCRLF)};
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
return;
|
||||
}
|
||||
|
||||
iovec v[] = {IoVec(kErrPref), IoVec(str), IoVec(kCRLF)};
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendProtocolError(std::string_view str) {
|
||||
SendError(absl::StrCat("-ERR Protocol error: ", str), "protocol_error");
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendSimpleString(std::string_view str) {
|
||||
iovec v[3] = {IoVec(kSimplePref), IoVec(str), IoVec(kCRLF)};
|
||||
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendStored() {
|
||||
SendSimpleString("OK");
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendSetSkipped() {
|
||||
SendNull();
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendNull() {
|
||||
iovec v[] = {IoVec(NullString(is_resp3_))};
|
||||
|
||||
Send(v, ABSL_ARRAYSIZE(v));
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendBulkString(std::string_view str) {
|
||||
char tmp[absl::numbers_internal::kFastToBufferSize + 3];
|
||||
tmp[0] = '$'; // Format length
|
||||
char* next = absl::numbers_internal::FastIntToBuffer(uint32_t(str.size()), tmp + 1);
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
|
||||
std::string_view lenpref{tmp, size_t(next - tmp)};
|
||||
|
||||
// 3 parts: length, string and CRLF.
|
||||
iovec v[3] = {IoVec(lenpref), IoVec(str), IoVec(kCRLF)};
|
||||
|
||||
return Send(v, ABSL_ARRAYSIZE(v));
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendVerbatimString(std::string_view str, VerbatimFormat format) {
|
||||
if (!is_resp3_)
|
||||
return SendBulkString(str);
|
||||
|
||||
char tmp[absl::numbers_internal::kFastToBufferSize + 7];
|
||||
tmp[0] = '=';
|
||||
// + 4 because format is three byte, and need to be followed by a ":"
|
||||
char* next = absl::numbers_internal::FastIntToBuffer(uint32_t(str.size() + 4), tmp + 1);
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
|
||||
DCHECK(format <= VerbatimFormat::MARKDOWN);
|
||||
if (format == VerbatimFormat::TXT)
|
||||
strcpy(next, "txt:");
|
||||
else if (format == VerbatimFormat::MARKDOWN)
|
||||
strcpy(next, "mkd:");
|
||||
next += 4;
|
||||
std::string_view lenpref{tmp, size_t(next - tmp)};
|
||||
iovec v[3] = {IoVec(lenpref), IoVec(str), IoVec(kCRLF)};
|
||||
return Send(v, ABSL_ARRAYSIZE(v));
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendLong(long num) {
|
||||
string str = absl::StrCat(":", num, kCRLF);
|
||||
SendRaw(str);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) {
|
||||
ReplyAggregator agg(this);
|
||||
if (!with_scores) {
|
||||
auto cb = [&](size_t indx) -> string_view { return arr[indx].first; };
|
||||
|
||||
SendStringArrInternal(arr.size(), std::move(cb), CollectionType::ARRAY);
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[DoubleToStringConverter::kBase10MaximalLength * 3]; // to be on the safe side.
|
||||
|
||||
if (!is_resp3_) { // RESP2 formats withscores as a flat array.
|
||||
auto cb = [&](size_t indx) -> string_view {
|
||||
if (indx % 2 == 0)
|
||||
return arr[indx / 2].first;
|
||||
|
||||
// NOTE: we reuse the same buffer, assuming that SendStringArrInternal does not reference
|
||||
// previous string_views. The assumption holds for small strings like
|
||||
// doubles because SendStringArrInternal employs small string optimization.
|
||||
// It's a bit hacky but saves allocations.
|
||||
return FormatDouble(arr[indx / 2].second, buf, sizeof(buf));
|
||||
};
|
||||
|
||||
SendStringArrInternal(arr.size() * 2, std::move(cb), CollectionType::ARRAY);
|
||||
return;
|
||||
}
|
||||
|
||||
// Resp3 formats withscores as array of (key, score) pairs.
|
||||
// TODO: to implement efficient serializing by extending SendStringArrInternal to support
|
||||
// 2-level arrays.
|
||||
StartArray(arr.size());
|
||||
for (const auto& p : arr) {
|
||||
StartArray(2);
|
||||
SendBulkString(p.first);
|
||||
SendDouble(p.second);
|
||||
}
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendDouble(double val) {
|
||||
char buf[64];
|
||||
|
||||
char* start = FormatDouble(val, buf, sizeof(buf));
|
||||
|
||||
if (!is_resp3_) {
|
||||
SendBulkString(start);
|
||||
} else {
|
||||
// RESP3
|
||||
SendRaw(absl::StrCat(",", start, kCRLF));
|
||||
}
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendMGetResponse(MGetResponse resp) {
|
||||
DCHECK(!resp.resp_arr.empty());
|
||||
|
||||
size_t size = resp.resp_arr.size();
|
||||
|
||||
size_t vec_len = std::min<size_t>(32, size);
|
||||
|
||||
constexpr size_t kBatchLen = 32 * 2 + 2; // (blob_size, blob) * 32 + 2 spares
|
||||
iovec vec_batch[kBatchLen];
|
||||
|
||||
// for all the meta data to fill the vec batch. 10 digits for the blob size and 6 for
|
||||
// $, \r, \n, \r, \n
|
||||
absl::FixedArray<char, 64> meta((vec_len + 2) * 16); // 2 for header and next item meta data.
|
||||
|
||||
char* next = meta.data();
|
||||
char* cur_meta = next;
|
||||
*next++ = '*';
|
||||
next = absl::numbers_internal::FastIntToBuffer(size, next);
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
|
||||
unsigned vec_indx = 0;
|
||||
const char* nullstr = NullString(is_resp3_);
|
||||
size_t nulllen = strlen(nullstr);
|
||||
auto get_pending_metabuf = [&] { return string_view{cur_meta, size_t(next - cur_meta)}; };
|
||||
|
||||
for (unsigned i = 0; i < size; ++i) {
|
||||
DCHECK_GE(meta.end() - next, 16); // We have at least 16 bytes for the meta data.
|
||||
if (resp.resp_arr[i]) {
|
||||
string_view blob = resp.resp_arr[i]->value;
|
||||
|
||||
*next++ = '$';
|
||||
next = absl::numbers_internal::FastIntToBuffer(blob.size(), next);
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
DCHECK_GT(next - cur_meta, 0);
|
||||
|
||||
vec_batch[vec_indx++] = IoVec(get_pending_metabuf());
|
||||
vec_batch[vec_indx++] = IoVec(blob);
|
||||
cur_meta = next; // we combine the CRLF with the next item meta data.
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
} else {
|
||||
memcpy(next, nullstr, nulllen);
|
||||
next += nulllen;
|
||||
}
|
||||
|
||||
if (vec_indx >= (kBatchLen - 2) || (meta.end() - next < 16)) {
|
||||
// we have space for at least one iovec because in the worst case we reached (kBatchLen - 3)
|
||||
// and then filled 2 vectors in the previous iteration.
|
||||
DCHECK_LE(vec_indx, kBatchLen - 1);
|
||||
|
||||
// if we do not have enough space in the meta buffer, we add the meta data to the
|
||||
// vector batch and reset it.
|
||||
if (meta.end() - next < 16) {
|
||||
vec_batch[vec_indx++] = IoVec(get_pending_metabuf());
|
||||
next = meta.data();
|
||||
cur_meta = next;
|
||||
}
|
||||
|
||||
Send(vec_batch, vec_indx);
|
||||
if (ec_)
|
||||
return;
|
||||
|
||||
vec_indx = 0;
|
||||
size_t meta_len = next - cur_meta;
|
||||
memcpy(meta.data(), cur_meta, meta_len);
|
||||
cur_meta = meta.data();
|
||||
next = cur_meta + meta_len;
|
||||
}
|
||||
}
|
||||
|
||||
if (next - cur_meta > 0) {
|
||||
vec_batch[vec_indx++] = IoVec(get_pending_metabuf());
|
||||
}
|
||||
if (vec_indx > 0)
|
||||
Send(vec_batch, vec_indx);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendSimpleStrArr(StrSpan arr) {
|
||||
string res = absl::StrCat("*", arr.Size(), kCRLF);
|
||||
for (string_view str : arr)
|
||||
StrAppend(&res, "+", str, kCRLF);
|
||||
|
||||
SendRaw(res);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendNullArray() {
|
||||
SendRaw("*-1\r\n");
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendEmptyArray() {
|
||||
StartArray(0);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::SendStringArr(StrSpan arr, CollectionType type) {
|
||||
if (type == ARRAY && arr.Size() == 0) {
|
||||
SendRaw("*0\r\n");
|
||||
return;
|
||||
}
|
||||
|
||||
auto cb = [&](size_t i) {
|
||||
return visit([i](auto& span) { return facade::ToSV(span[i]); }, arr.span);
|
||||
};
|
||||
SendStringArrInternal(arr.Size(), std::move(cb), type);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder::StartArray(unsigned len) {
|
||||
StartCollection(len, ARRAY);
|
||||
}
|
||||
|
||||
constexpr static string_view START_SYMBOLS[] = {"*", "~", "%", ">"};
|
||||
static_assert(START_SYMBOLS[RedisReplyBuilder::MAP] == "%" &&
|
||||
START_SYMBOLS[RedisReplyBuilder::SET] == "~");
|
||||
|
||||
void RedisReplyBuilder::StartCollection(unsigned len, CollectionType type) {
|
||||
if (!is_resp3_) { // Flatten for Resp2
|
||||
if (type == MAP)
|
||||
len *= 2;
|
||||
type = ARRAY;
|
||||
}
|
||||
|
||||
DVLOG(2) << "StartCollection(" << len << ", " << type << ")";
|
||||
|
||||
// We do not want to send multiple packets for small responses because these
|
||||
// trigger TCP-related artifacts (e.g. Nagle's algorithm) that slow down the delivery of the whole
|
||||
// response.
|
||||
bool prev = should_aggregate_;
|
||||
should_aggregate_ |= (len > 0);
|
||||
SendRaw(absl::StrCat(START_SYMBOLS[type], len, kCRLF));
|
||||
should_aggregate_ = prev;
|
||||
}
|
||||
|
||||
// This implementation a bit complicated because it uses vectorized
|
||||
// send to send an array. The problem with that is the OS limits vector length to UIO_MAXIOV.
|
||||
// Therefore, to make it robust we send the array in batches.
|
||||
// We limit the vector length, and when it fills up we flush it to the socket and continue
|
||||
// iterating.
|
||||
void RedisReplyBuilder::SendStringArrInternal(
|
||||
size_t size, absl::FunctionRef<std::string_view(unsigned)> producer, CollectionType type) {
|
||||
size_t header_len = size;
|
||||
string_view type_char = "*";
|
||||
if (is_resp3_) {
|
||||
type_char = START_SYMBOLS[type];
|
||||
if (type == MAP)
|
||||
header_len /= 2; // Each key value pair counts as one.
|
||||
}
|
||||
|
||||
if (header_len == 0) {
|
||||
SendRaw(absl::StrCat(type_char, "0\r\n"));
|
||||
return;
|
||||
}
|
||||
|
||||
// We limit iovec capacity, vectorized length is limited upto UIO_MAXIOV (Send returns EMSGSIZE).
|
||||
size_t vec_cap = std::min<size_t>(UIO_MAXIOV, size * 2);
|
||||
absl::FixedArray<iovec, 16> vec(vec_cap);
|
||||
absl::FixedArray<char, 128> meta(std::max<size_t>(vec_cap * 64, 128u));
|
||||
|
||||
char* start = meta.data();
|
||||
char* next = start;
|
||||
|
||||
// at most 35 chars.
|
||||
auto serialize_len = [&](char prefix, size_t len) {
|
||||
*next++ = prefix;
|
||||
next = absl::numbers_internal::FastIntToBuffer(len, next); // at most 32 chars
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
};
|
||||
|
||||
serialize_len(type_char[0], header_len);
|
||||
unsigned vec_indx = 0;
|
||||
string_view src;
|
||||
|
||||
#define FLUSH_IOVEC() \
|
||||
do { \
|
||||
Send(vec.data(), vec_indx); \
|
||||
if (ec_) \
|
||||
return; \
|
||||
vec_indx = 0; \
|
||||
next = meta.data(); \
|
||||
} while (false)
|
||||
|
||||
for (unsigned i = 0; i < size; ++i) {
|
||||
DCHECK_LT(vec_indx, vec_cap);
|
||||
|
||||
src = producer(i);
|
||||
serialize_len('$', src.size());
|
||||
|
||||
// copy data either by referencing via an iovec or copying inline into meta buf.
|
||||
constexpr size_t kSSOLen = 32;
|
||||
if (src.size() > kSSOLen) {
|
||||
// reference metadata blob before referencing another vector.
|
||||
DCHECK_GT(next - start, 0);
|
||||
vec[vec_indx++] = IoVec(string_view{start, size_t(next - start)});
|
||||
if (vec_indx >= vec_cap) {
|
||||
FLUSH_IOVEC();
|
||||
}
|
||||
|
||||
DCHECK_LT(vec_indx, vec.size());
|
||||
vec[vec_indx++] = IoVec(src);
|
||||
if (vec_indx >= vec_cap) {
|
||||
FLUSH_IOVEC();
|
||||
}
|
||||
start = next;
|
||||
} else if (src.size() > 0) {
|
||||
// NOTE!: this is not just optimization. producer may returns a string_piece that will
|
||||
// be overriden for the next call, so we must do this for correctness.
|
||||
memcpy(next, src.data(), src.size());
|
||||
next += src.size();
|
||||
}
|
||||
|
||||
// how much buffer we need to perform the next iteration.
|
||||
constexpr ptrdiff_t kMargin = kSSOLen + 3 /* $\r\n */ + 2 /*length*/ + 2 /* \r\n */;
|
||||
|
||||
// Keep at least kMargin bytes for a small string as well as its length.
|
||||
if (kMargin >= meta.end() - next) {
|
||||
// Flush the iovec array.
|
||||
vec[vec_indx++] = IoVec(string_view{start, size_t(next - start)});
|
||||
FLUSH_IOVEC();
|
||||
start = next;
|
||||
}
|
||||
*next++ = '\r';
|
||||
*next++ = '\n';
|
||||
}
|
||||
|
||||
vec[vec_indx].iov_base = start;
|
||||
vec[vec_indx].iov_len = next - start;
|
||||
Send(vec.data(), vec_indx + 1);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendNull() {
|
||||
ReplyScope scope(this);
|
||||
has_replied_ = true;
|
||||
resp3_ ? WritePieces(kNullStringR3) : WritePieces(kNullStringR2);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendSimpleString(std::string_view str) {
|
||||
void RedisReplyBuilderBase::SendSimpleString(std::string_view str) {
|
||||
ReplyScope scope(this);
|
||||
has_replied_ = true;
|
||||
if (str.size() <= kMaxInlineSize * 2)
|
||||
return WritePieces(kSimplePref, str, kCRLF);
|
||||
|
||||
|
@ -861,9 +269,8 @@ void RedisReplyBuilder2Base::SendSimpleString(std::string_view str) {
|
|||
WritePieces(kCRLF);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendBulkString(std::string_view str) {
|
||||
void RedisReplyBuilderBase::SendBulkString(std::string_view str) {
|
||||
ReplyScope scope(this);
|
||||
has_replied_ = true;
|
||||
if (str.size() <= kMaxInlineSize)
|
||||
return WritePieces(kLengthPrefix, uint32_t(str.size()), kCRLF, str, kCRLF);
|
||||
|
||||
|
@ -872,14 +279,12 @@ void RedisReplyBuilder2Base::SendBulkString(std::string_view str) {
|
|||
WritePieces(kCRLF);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendLong(long val) {
|
||||
void RedisReplyBuilderBase::SendLong(long val) {
|
||||
ReplyScope scope(this);
|
||||
has_replied_ = true;
|
||||
WritePieces(kLongPref, val, kCRLF);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendDouble(double val) {
|
||||
has_replied_ = true;
|
||||
void RedisReplyBuilderBase::SendDouble(double val) {
|
||||
char buf[DoubleToStringConverter::kBase10MaximalLength + 8]; // +8 to be on the safe side.
|
||||
static_assert(ABSL_ARRAYSIZE(buf) < kMaxInlineSize, "Write temporary string from buf inline");
|
||||
string_view val_str = FormatDouble(val, buf, ABSL_ARRAYSIZE(buf));
|
||||
|
@ -891,18 +296,16 @@ void RedisReplyBuilder2Base::SendDouble(double val) {
|
|||
WritePieces(kDoublePref, val_str, kCRLF);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendNullArray() {
|
||||
void RedisReplyBuilderBase::SendNullArray() {
|
||||
ReplyScope scope(this);
|
||||
has_replied_ = true;
|
||||
WritePieces("*-1", kCRLF);
|
||||
}
|
||||
|
||||
constexpr static const char START_SYMBOLS2[4][2] = {"*", "~", "%", ">"};
|
||||
static_assert(START_SYMBOLS2[RedisReplyBuilder2Base::MAP][0] == '%' &&
|
||||
START_SYMBOLS2[RedisReplyBuilder2Base::SET][0] == '~');
|
||||
static_assert(START_SYMBOLS2[RedisReplyBuilderBase::MAP][0] == '%' &&
|
||||
START_SYMBOLS2[RedisReplyBuilderBase::SET][0] == '~');
|
||||
|
||||
void RedisReplyBuilder2Base::StartCollection(unsigned len, CollectionType ct) {
|
||||
has_replied_ = true;
|
||||
void RedisReplyBuilderBase::StartCollection(unsigned len, CollectionType ct) {
|
||||
if (!IsResp3()) { // RESP2 supports only arrays
|
||||
if (ct == MAP)
|
||||
len *= 2;
|
||||
|
@ -912,9 +315,8 @@ void RedisReplyBuilder2Base::StartCollection(unsigned len, CollectionType ct) {
|
|||
WritePieces(START_SYMBOLS2[ct], len, kCRLF);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendError(std::string_view str, std::string_view type) {
|
||||
void RedisReplyBuilderBase::SendError(std::string_view str, std::string_view type) {
|
||||
ReplyScope scope(this);
|
||||
has_replied_ = true;
|
||||
|
||||
if (type.empty()) {
|
||||
type = str;
|
||||
|
@ -930,18 +332,17 @@ void RedisReplyBuilder2Base::SendError(std::string_view str, std::string_view ty
|
|||
WritePieces(str, kCRLF);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendProtocolError(std::string_view str) {
|
||||
void RedisReplyBuilderBase::SendProtocolError(std::string_view str) {
|
||||
SendError(absl::StrCat("-ERR Protocol error: ", str), "protocol_error");
|
||||
}
|
||||
|
||||
char* RedisReplyBuilder2Base::FormatDouble(double d, char* dest, unsigned len) {
|
||||
char* RedisReplyBuilderBase::FormatDouble(double d, char* dest, unsigned len) {
|
||||
StringBuilder sb(dest, len);
|
||||
CHECK(dfly_conv.ToShortest(d, &sb));
|
||||
return sb.Finalize();
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2Base::SendVerbatimString(std::string_view str, VerbatimFormat format) {
|
||||
has_replied_ = true;
|
||||
void RedisReplyBuilderBase::SendVerbatimString(std::string_view str, VerbatimFormat format) {
|
||||
DCHECK(format <= VerbatimFormat::MARKDOWN);
|
||||
if (!IsResp3())
|
||||
return SendBulkString(str);
|
||||
|
@ -955,26 +356,26 @@ void RedisReplyBuilder2Base::SendVerbatimString(std::string_view str, VerbatimFo
|
|||
WritePieces(kCRLF);
|
||||
}
|
||||
|
||||
std::string RedisReplyBuilder2Base::SerializeCommand(std::string_view command) {
|
||||
std::string RedisReplyBuilderBase::SerializeCommand(std::string_view command) {
|
||||
return string{command} + kCRLF;
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendSimpleStrArr2(const facade::ArgRange& strs) {
|
||||
void RedisReplyBuilder::SendSimpleStrArr(const facade::ArgRange& strs) {
|
||||
ReplyScope scope(this);
|
||||
StartArray(strs.Size());
|
||||
for (std::string_view str : strs)
|
||||
SendSimpleString(str);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendBulkStrArr(const facade::ArgRange& strs, CollectionType ct) {
|
||||
void RedisReplyBuilder::SendBulkStrArr(const facade::ArgRange& strs, CollectionType ct) {
|
||||
ReplyScope scope(this);
|
||||
StartCollection(ct == CollectionType::MAP ? strs.Size() / 2 : strs.Size(), ct);
|
||||
for (std::string_view str : strs)
|
||||
SendBulkString(str);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) {
|
||||
void RedisReplyBuilder::SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) {
|
||||
ReplyScope scope(this);
|
||||
StartArray((with_scores && !IsResp3()) ? arr.size() * 2 : arr.size());
|
||||
for (const auto& [str, score] : arr) {
|
||||
|
@ -986,31 +387,20 @@ void RedisReplyBuilder2::SendScoredArray(absl::Span<const std::pair<std::string,
|
|||
}
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendStored() {
|
||||
void RedisReplyBuilder::SendStored() {
|
||||
SendSimpleString("OK");
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendSetSkipped() {
|
||||
void RedisReplyBuilder::SendSetSkipped() {
|
||||
SendNull();
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::StartArray(unsigned len) {
|
||||
void RedisReplyBuilder::StartArray(unsigned len) {
|
||||
StartCollection(len, CollectionType::ARRAY);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendEmptyArray() {
|
||||
void RedisReplyBuilder::SendEmptyArray() {
|
||||
StartArray(0);
|
||||
}
|
||||
|
||||
void RedisReplyBuilder2::SendMGetResponse(SinkReplyBuilder::MGetResponse resp) {
|
||||
ReplyScope scope(this);
|
||||
StartArray(resp.resp_arr.size());
|
||||
for (const auto& entry : resp.resp_arr) {
|
||||
if (entry)
|
||||
SendBulkString(entry->value);
|
||||
else
|
||||
SendNull();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace facade
|
||||
|
|
|
@ -21,188 +21,28 @@ enum class ReplyMode {
|
|||
FULL // All replies are recorded
|
||||
};
|
||||
|
||||
class SinkReplyBuilder {
|
||||
public:
|
||||
struct MGetStorage {
|
||||
MGetStorage* next = nullptr;
|
||||
char data[1];
|
||||
};
|
||||
|
||||
struct GetResp {
|
||||
std::string key; // TODO: to use backing storage to optimize this as well.
|
||||
std::string_view value;
|
||||
|
||||
uint64_t mc_ver = 0; // 0 means we do not output it (i.e has not been requested).
|
||||
uint32_t mc_flag = 0;
|
||||
|
||||
GetResp() = default;
|
||||
GetResp(std::string_view val) : value(val) {
|
||||
}
|
||||
};
|
||||
|
||||
struct MGetResponse {
|
||||
MGetStorage* storage_list = nullptr; // backing storage of resp_arr values.
|
||||
std::vector<std::optional<GetResp>> resp_arr;
|
||||
|
||||
MGetResponse() = default;
|
||||
|
||||
MGetResponse(size_t size) : resp_arr(size) {
|
||||
}
|
||||
|
||||
~MGetResponse();
|
||||
|
||||
MGetResponse(MGetResponse&& other) noexcept
|
||||
: storage_list(other.storage_list), resp_arr(std::move(other.resp_arr)) {
|
||||
other.storage_list = nullptr;
|
||||
}
|
||||
|
||||
MGetResponse& operator=(MGetResponse&& other) noexcept {
|
||||
resp_arr = std::move(other.resp_arr);
|
||||
storage_list = other.storage_list;
|
||||
other.storage_list = nullptr;
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
SinkReplyBuilder(const SinkReplyBuilder&) = delete;
|
||||
void operator=(const SinkReplyBuilder&) = delete;
|
||||
|
||||
enum Type { REDIS, MC };
|
||||
|
||||
explicit SinkReplyBuilder(::io::Sink* sink, Type t);
|
||||
|
||||
virtual ~SinkReplyBuilder() {
|
||||
}
|
||||
|
||||
static MGetStorage* AllocMGetStorage(size_t size) {
|
||||
static_assert(alignof(MGetStorage) == 8); // if this breaks we should fix the code below.
|
||||
char* buf = new char[size + sizeof(MGetStorage)];
|
||||
return new (buf) MGetStorage();
|
||||
}
|
||||
|
||||
virtual void SendError(std::string_view str, std::string_view type = {}) = 0; // MC and Redis
|
||||
virtual void SendError(OpStatus status);
|
||||
void SendError(ErrorReply error);
|
||||
|
||||
virtual void SendStored() = 0; // Reply for set commands.
|
||||
virtual void SendSetSkipped() = 0;
|
||||
|
||||
virtual void SendMGetResponse(MGetResponse resp) = 0;
|
||||
|
||||
virtual void SendLong(long val) = 0;
|
||||
virtual void SendSimpleString(std::string_view str) = 0;
|
||||
|
||||
void SendOk() {
|
||||
SendSimpleString("OK");
|
||||
}
|
||||
|
||||
virtual void SendProtocolError(std::string_view str) = 0;
|
||||
|
||||
// In order to reduce interrupt rate we allow coalescing responses together using
|
||||
// Batch mode. It is controlled by Connection state machine because it makes sense only
|
||||
// when pipelined requests are arriving.
|
||||
virtual void SetBatchMode(bool batch);
|
||||
|
||||
virtual void FlushBatch();
|
||||
|
||||
// Used for QUIT - > should move to conn_context?
|
||||
virtual void CloseConnection();
|
||||
|
||||
virtual std::error_code GetError() const {
|
||||
return ec_;
|
||||
}
|
||||
|
||||
bool IsSendActive() const {
|
||||
return send_active_; // BROKEN
|
||||
}
|
||||
|
||||
struct ReplyAggregator {
|
||||
explicit ReplyAggregator(SinkReplyBuilder* builder) : builder_(builder) {
|
||||
// If the builder is already aggregating then don't aggregate again as
|
||||
// this will cause redundant sink writes (such as in a MULTI/EXEC).
|
||||
if (builder->should_aggregate_) {
|
||||
return;
|
||||
}
|
||||
builder_->StartAggregate();
|
||||
is_nested_ = false;
|
||||
}
|
||||
|
||||
~ReplyAggregator() {
|
||||
if (!is_nested_) {
|
||||
builder_->StopAggregate();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SinkReplyBuilder* builder_;
|
||||
bool is_nested_ = true;
|
||||
};
|
||||
|
||||
void ExpectReply();
|
||||
bool HasReplied() const {
|
||||
return has_replied_;
|
||||
}
|
||||
|
||||
virtual size_t UsedMemory() const;
|
||||
|
||||
static const ReplyStats& GetThreadLocalStats() {
|
||||
return tl_facade_stats->reply_stats;
|
||||
}
|
||||
|
||||
virtual void StartAggregate();
|
||||
virtual void StopAggregate();
|
||||
|
||||
std::string ConsumeLastError() {
|
||||
return std::exchange(last_error_, std::string{});
|
||||
}
|
||||
|
||||
Type type() const {
|
||||
return type_;
|
||||
}
|
||||
|
||||
protected:
|
||||
void SendRaw(std::string_view str); // Sends raw without any formatting.
|
||||
|
||||
void Send(const iovec* v, uint32_t len);
|
||||
|
||||
std::string batch_;
|
||||
::io::Sink* sink_;
|
||||
std::error_code ec_;
|
||||
|
||||
// msg and kind/type
|
||||
std::string last_error_;
|
||||
|
||||
bool should_batch_ : 1;
|
||||
|
||||
// Similarly to batch mode but is controlled by at operation level.
|
||||
bool should_aggregate_ : 1;
|
||||
bool has_replied_ : 1;
|
||||
bool send_active_ : 1;
|
||||
Type type_;
|
||||
};
|
||||
|
||||
// Base class for all reply builders. Offer a simple high level interface for controlling output
|
||||
// modes and sending basic response types.
|
||||
class SinkReplyBuilder2 {
|
||||
class SinkReplyBuilder {
|
||||
struct GuardBase {
|
||||
bool prev;
|
||||
SinkReplyBuilder2* rb;
|
||||
SinkReplyBuilder* rb;
|
||||
};
|
||||
|
||||
public:
|
||||
constexpr static size_t kMaxInlineSize = 32;
|
||||
constexpr static size_t kMaxBufferSize = 8192;
|
||||
|
||||
explicit SinkReplyBuilder2(io::Sink* sink) : sink_(sink) {
|
||||
explicit SinkReplyBuilder(io::Sink* sink) : sink_(sink) {
|
||||
}
|
||||
|
||||
virtual ~SinkReplyBuilder2() = default;
|
||||
virtual ~SinkReplyBuilder() = default;
|
||||
|
||||
// USE WITH CARE! ReplyScope assumes that all string views in Send calls keep valid for the scopes
|
||||
// lifetime. This allows the builder to avoid copies by enqueueing long strings directly for
|
||||
// vectorized io.
|
||||
struct ReplyScope : GuardBase {
|
||||
explicit ReplyScope(SinkReplyBuilder2* rb) : GuardBase{std::exchange(rb->scoped_, true), rb} {
|
||||
explicit ReplyScope(SinkReplyBuilder* rb) : GuardBase{std::exchange(rb->scoped_, true), rb} {
|
||||
}
|
||||
|
||||
~ReplyScope();
|
||||
|
@ -211,7 +51,7 @@ class SinkReplyBuilder2 {
|
|||
// Aggregator reduces the number of raw send calls by copying data in an intermediate buffer.
|
||||
// Prefer ReplyScope if possible to additionally reduce the number of copies.
|
||||
struct ReplyAggregator : GuardBase {
|
||||
explicit ReplyAggregator(SinkReplyBuilder2* rb)
|
||||
explicit ReplyAggregator(SinkReplyBuilder* rb)
|
||||
: GuardBase{std::exchange(rb->batched_, true), rb} {
|
||||
}
|
||||
|
||||
|
@ -228,7 +68,11 @@ class SinkReplyBuilder2 {
|
|||
return buffer_.Capacity();
|
||||
}
|
||||
|
||||
bool IsSendActive() {
|
||||
size_t RepliesRecorded() const {
|
||||
return replies_recorded_;
|
||||
}
|
||||
|
||||
bool IsSendActive() const {
|
||||
return send_active_;
|
||||
}
|
||||
|
||||
|
@ -243,6 +87,8 @@ class SinkReplyBuilder2 {
|
|||
}
|
||||
|
||||
public: // High level interface
|
||||
virtual Protocol GetProtocol() const = 0;
|
||||
|
||||
virtual void SendLong(long val) = 0;
|
||||
virtual void SendSimpleString(std::string_view str) = 0;
|
||||
|
||||
|
@ -257,6 +103,10 @@ class SinkReplyBuilder2 {
|
|||
void SendError(ErrorReply error);
|
||||
virtual void SendProtocolError(std::string_view str) = 0;
|
||||
|
||||
std::string ConsumeLastError() {
|
||||
return std::exchange(last_error_, {});
|
||||
}
|
||||
|
||||
protected:
|
||||
template <typename... Ts>
|
||||
void WritePieces(Ts&&... pieces); // Copy pieces into buffer and reference buffer
|
||||
|
@ -267,11 +117,15 @@ class SinkReplyBuilder2 {
|
|||
|
||||
void Send();
|
||||
|
||||
protected:
|
||||
size_t replies_recorded_ = 0;
|
||||
std::string last_error_;
|
||||
|
||||
private:
|
||||
io::Sink* sink_;
|
||||
std::error_code ec_;
|
||||
|
||||
bool send_active_ = false;
|
||||
bool send_active_ = false; // set while Send() is suspended on socket write
|
||||
bool scoped_ = false, batched_ = false;
|
||||
|
||||
size_t total_size_ = 0; // sum of vec_ lengths
|
||||
|
@ -285,40 +139,15 @@ class SinkReplyBuilder2 {
|
|||
};
|
||||
|
||||
class MCReplyBuilder : public SinkReplyBuilder {
|
||||
bool noreply_;
|
||||
|
||||
public:
|
||||
MCReplyBuilder(::io::Sink* stream);
|
||||
explicit MCReplyBuilder(::io::Sink* sink);
|
||||
|
||||
using SinkReplyBuilder::SendRaw;
|
||||
~MCReplyBuilder() override = default;
|
||||
|
||||
void SendError(std::string_view str, std::string_view type = std::string_view{}) final;
|
||||
|
||||
// void SendGetReply(std::string_view key, uint32_t flags, std::string_view value) final;
|
||||
void SendMGetResponse(MGetResponse resp) final;
|
||||
|
||||
void SendStored() final;
|
||||
void SendLong(long val) final;
|
||||
void SendSetSkipped() final;
|
||||
|
||||
void SendClientError(std::string_view str);
|
||||
void SendNotFound();
|
||||
void SendSimpleString(std::string_view str) final;
|
||||
void SendProtocolError(std::string_view str) final;
|
||||
|
||||
void SetNoreply(bool noreply) {
|
||||
noreply_ = noreply;
|
||||
Protocol GetProtocol() const final {
|
||||
return Protocol::MEMCACHE;
|
||||
}
|
||||
|
||||
bool NoReply() const;
|
||||
};
|
||||
|
||||
class MCReplyBuilder2 : public SinkReplyBuilder2 {
|
||||
public:
|
||||
explicit MCReplyBuilder2(::io::Sink* sink);
|
||||
|
||||
~MCReplyBuilder2() override = default;
|
||||
|
||||
void SendError(std::string_view str, std::string_view type = std::string_view{}) final;
|
||||
|
||||
void SendStored() final;
|
||||
|
@ -332,6 +161,8 @@ class MCReplyBuilder2 : public SinkReplyBuilder2 {
|
|||
void SendSimpleString(std::string_view str) final;
|
||||
void SendProtocolError(std::string_view str) final;
|
||||
|
||||
void SendRaw(std::string_view str);
|
||||
|
||||
void SetNoreply(bool noreply) {
|
||||
noreply_ = noreply;
|
||||
}
|
||||
|
@ -344,161 +175,72 @@ class MCReplyBuilder2 : public SinkReplyBuilder2 {
|
|||
bool noreply_ = false;
|
||||
};
|
||||
|
||||
class RedisReplyBuilder : public SinkReplyBuilder {
|
||||
// Redis reply builder interface for sending RESP data.
|
||||
class RedisReplyBuilderBase : public SinkReplyBuilder {
|
||||
public:
|
||||
enum CollectionType { ARRAY, SET, MAP, PUSH };
|
||||
|
||||
enum VerbatimFormat { TXT, MARKDOWN };
|
||||
|
||||
using StrSpan = facade::ArgRange;
|
||||
|
||||
RedisReplyBuilder(::io::Sink* stream);
|
||||
|
||||
virtual void SetResp3(bool is_resp3);
|
||||
virtual bool IsResp3() const {
|
||||
return is_resp3_;
|
||||
explicit RedisReplyBuilderBase(io::Sink* sink) : SinkReplyBuilder(sink) {
|
||||
}
|
||||
|
||||
void SendError(std::string_view str, std::string_view type = {}) override;
|
||||
using SinkReplyBuilder::SendError;
|
||||
~RedisReplyBuilderBase() override = default;
|
||||
|
||||
void SendMGetResponse(MGetResponse resp) override;
|
||||
|
||||
void SendStored() override;
|
||||
void SendSetSkipped() override;
|
||||
void SendProtocolError(std::string_view str) override;
|
||||
|
||||
virtual void SendNullArray(); // Send *-1
|
||||
virtual void SendEmptyArray(); // Send *0
|
||||
virtual void SendSimpleStrArr(StrSpan arr);
|
||||
virtual void SendStringArr(StrSpan arr, CollectionType type = ARRAY);
|
||||
Protocol GetProtocol() const final {
|
||||
return Protocol::REDIS;
|
||||
}
|
||||
|
||||
virtual void SendNull();
|
||||
void SendLong(long val) override;
|
||||
virtual void SendDouble(double val);
|
||||
void SendSimpleString(std::string_view str) override;
|
||||
|
||||
virtual void SendBulkString(std::string_view str);
|
||||
virtual void SendVerbatimString(std::string_view str, VerbatimFormat format = TXT);
|
||||
virtual void SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores);
|
||||
|
||||
void StartArray(unsigned len); // StartCollection(len, ARRAY)
|
||||
|
||||
virtual void StartCollection(unsigned len, CollectionType type);
|
||||
|
||||
static char* FormatDouble(double val, char* dest, unsigned dest_len);
|
||||
|
||||
private:
|
||||
void SendStringArrInternal(size_t size, absl::FunctionRef<std::string_view(unsigned)> producer,
|
||||
CollectionType type);
|
||||
|
||||
bool is_resp3_ = false;
|
||||
};
|
||||
|
||||
// Redis reply builder interface for sending RESP data.
|
||||
class RedisReplyBuilder2Base : public SinkReplyBuilder2, public RedisReplyBuilder {
|
||||
public:
|
||||
using CollectionType = RedisReplyBuilder::CollectionType;
|
||||
using VerbatimFormat = RedisReplyBuilder::VerbatimFormat;
|
||||
|
||||
explicit RedisReplyBuilder2Base(io::Sink* sink)
|
||||
: SinkReplyBuilder2(sink), RedisReplyBuilder(nullptr) {
|
||||
}
|
||||
|
||||
~RedisReplyBuilder2Base() override = default;
|
||||
|
||||
void SendNull() override;
|
||||
|
||||
void SendSimpleString(std::string_view str) override;
|
||||
void SendBulkString(std::string_view str) override; // RESP: Blob String
|
||||
virtual void SendBulkString(std::string_view str); // RESP: Blob String
|
||||
|
||||
void SendLong(long val) override;
|
||||
void SendDouble(double val) override; // RESP: Number
|
||||
virtual void SendDouble(double val); // RESP: Number
|
||||
|
||||
void SendNullArray() override;
|
||||
void StartCollection(unsigned len, CollectionType ct) override;
|
||||
virtual void SendNullArray();
|
||||
virtual void StartCollection(unsigned len, CollectionType ct);
|
||||
|
||||
using SinkReplyBuilder2::SendError;
|
||||
using SinkReplyBuilder::SendError;
|
||||
void SendError(std::string_view str, std::string_view type = {}) override;
|
||||
void SendProtocolError(std::string_view str) override;
|
||||
|
||||
virtual void SendVerbatimString(std::string_view str, VerbatimFormat format = TXT) override;
|
||||
virtual void SendVerbatimString(std::string_view str, VerbatimFormat format = TXT);
|
||||
|
||||
static char* FormatDouble(double d, char* dest, unsigned len);
|
||||
static std::string SerializeCommand(std::string_view command);
|
||||
|
||||
bool IsResp3() const override {
|
||||
bool IsResp3() const {
|
||||
return resp3_;
|
||||
}
|
||||
|
||||
// REMOVE THIS override
|
||||
void SetResp3(bool resp3) override {
|
||||
void SetResp3(bool resp3) {
|
||||
resp3_ = resp3;
|
||||
}
|
||||
|
||||
// REMOVE THIS
|
||||
void SetBatchMode(bool mode) override {
|
||||
SinkReplyBuilder2::SetBatchMode(mode);
|
||||
}
|
||||
|
||||
void StartAggregate() override {
|
||||
aggregators_.emplace_back(this);
|
||||
}
|
||||
|
||||
void StopAggregate() override {
|
||||
aggregators_.pop_back();
|
||||
}
|
||||
|
||||
void FlushBatch() override {
|
||||
SinkReplyBuilder2::Flush();
|
||||
}
|
||||
|
||||
// REMOVE THIS
|
||||
|
||||
void CloseConnection() override {
|
||||
SinkReplyBuilder2::CloseConnection();
|
||||
}
|
||||
|
||||
std::error_code GetError() const override {
|
||||
return SinkReplyBuilder2::GetError();
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<SinkReplyBuilder2::ReplyAggregator> aggregators_;
|
||||
bool resp3_ = false;
|
||||
};
|
||||
|
||||
// Non essential rediss reply builder functions implemented on top of the base resp protocol
|
||||
class RedisReplyBuilder2 : public RedisReplyBuilder2Base {
|
||||
// Non essential redis reply builder functions implemented on top of the base resp protocol
|
||||
class RedisReplyBuilder : public RedisReplyBuilderBase {
|
||||
public:
|
||||
RedisReplyBuilder2(io::Sink* sink) : RedisReplyBuilder2Base(sink) {
|
||||
using RedisReplyBuilderBase::CollectionType;
|
||||
|
||||
RedisReplyBuilder(io::Sink* sink) : RedisReplyBuilderBase(sink) {
|
||||
}
|
||||
|
||||
~RedisReplyBuilder2() override = default;
|
||||
|
||||
void SendSimpleStrArr2(const facade::ArgRange& strs);
|
||||
~RedisReplyBuilder() override = default;
|
||||
|
||||
void SendSimpleStrArr(const facade::ArgRange& strs);
|
||||
void SendBulkStrArr(const facade::ArgRange& strs, CollectionType ct = ARRAY);
|
||||
void SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) override;
|
||||
|
||||
void SendSimpleStrArr(RedisReplyBuilder::StrSpan arr) override {
|
||||
SendSimpleStrArr2(arr);
|
||||
}
|
||||
|
||||
void SendStringArr(RedisReplyBuilder::StrSpan arr, CollectionType type = ARRAY) override {
|
||||
SendBulkStrArr(arr, type);
|
||||
}
|
||||
void SendScoredArray(absl::Span<const std::pair<std::string, double>> arr, bool with_scores);
|
||||
|
||||
void SendStored() final;
|
||||
void SendSetSkipped() final;
|
||||
|
||||
void StartArray(unsigned len);
|
||||
void SendEmptyArray() override;
|
||||
|
||||
// TODO: Remove
|
||||
void SendMGetResponse(SinkReplyBuilder::MGetResponse resp) override;
|
||||
void SendEmptyArray();
|
||||
};
|
||||
|
||||
} // namespace facade
|
||||
|
|
|
@ -83,7 +83,7 @@ class RedisReplyBuilderTest : public testing::Test {
|
|||
|
||||
void SetUp() {
|
||||
sink_.Clear();
|
||||
builder_.reset(new RedisReplyBuilder2(&sink_));
|
||||
builder_.reset(new RedisReplyBuilder(&sink_));
|
||||
ResetStats();
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ class RedisReplyBuilderTest : public testing::Test {
|
|||
}
|
||||
|
||||
unsigned GetError(string_view err) const {
|
||||
const auto& map = SinkReplyBuilder2::GetThreadLocalStats().err_count;
|
||||
const auto& map = SinkReplyBuilder::GetThreadLocalStats().err_count;
|
||||
auto it = map.find(err);
|
||||
return it == map.end() ? 0 : it->second;
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ class RedisReplyBuilderTest : public testing::Test {
|
|||
ParsingResults Parse();
|
||||
|
||||
io::StringSink sink_;
|
||||
std::unique_ptr<RedisReplyBuilder2> builder_;
|
||||
std::unique_ptr<RedisReplyBuilder> builder_;
|
||||
std::unique_ptr<std::uint8_t[]> parser_buffer_;
|
||||
};
|
||||
|
||||
|
@ -205,7 +205,7 @@ RedisReplyBuilderTest::ParsingResults RedisReplyBuilderTest::Parse() {
|
|||
|
||||
TEST_F(RedisReplyBuilderTest, MessageSend) {
|
||||
// Test each message that is "sent" to the sink
|
||||
builder_->SinkReplyBuilder2::SendOk();
|
||||
builder_->SinkReplyBuilder::SendOk();
|
||||
ASSERT_EQ(TakePayload(), kOKMessage);
|
||||
builder_->StartArray(10);
|
||||
|
||||
|
@ -783,7 +783,7 @@ TEST_F(RedisReplyBuilderTest, BasicCapture) {
|
|||
string_view kTestSws[] = {"a1"sv, "a2"sv, "a3"sv, "a4"sv};
|
||||
|
||||
CapturingReplyBuilder crb{};
|
||||
using RRB = RedisReplyBuilder2;
|
||||
using RRB = RedisReplyBuilder;
|
||||
|
||||
auto big_arr_cb = [](RRB* r) {
|
||||
r->StartArray(4);
|
||||
|
@ -870,12 +870,12 @@ TEST_F(RedisReplyBuilderTest, VerbatimString) {
|
|||
std::string str = "A simple string!";
|
||||
|
||||
builder_->SetResp3(true);
|
||||
builder_->SendVerbatimString(str, RedisReplyBuilder2::VerbatimFormat::TXT);
|
||||
builder_->SendVerbatimString(str, RedisReplyBuilder::VerbatimFormat::TXT);
|
||||
ASSERT_TRUE(NoErrors());
|
||||
ASSERT_EQ(TakePayload(), "=20\r\ntxt:A simple string!\r\n") << "Resp3 VerbatimString TXT failed.";
|
||||
|
||||
builder_->SetResp3(true);
|
||||
builder_->SendVerbatimString(str, RedisReplyBuilder2::VerbatimFormat::MARKDOWN);
|
||||
builder_->SendVerbatimString(str, RedisReplyBuilder::VerbatimFormat::MARKDOWN);
|
||||
ASSERT_TRUE(NoErrors());
|
||||
ASSERT_EQ(TakePayload(), "=20\r\nmkd:A simple string!\r\n") << "Resp3 VerbatimString TXT failed.";
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "reply_capture.h"
|
||||
|
||||
#define SKIP_LESS(needed) \
|
||||
has_replied_ = true; \
|
||||
replies_recorded_++; \
|
||||
if (reply_mode_ < needed) { \
|
||||
current_ = monostate{}; \
|
||||
return; \
|
||||
|
@ -23,44 +23,11 @@ void CapturingReplyBuilder::SendError(std::string_view str, std::string_view typ
|
|||
Capture(Error{str, type});
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendMGetResponse(MGetResponse resp) {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
Capture(std::move(resp));
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendError(OpStatus status) {
|
||||
if (status != OpStatus::OK) {
|
||||
last_error_ = StatusToMsg(status);
|
||||
}
|
||||
SKIP_LESS(ReplyMode::ONLY_ERR);
|
||||
Capture(status);
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendNullArray() {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
Capture(unique_ptr<CollectionPayload>{nullptr});
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendEmptyArray() {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
Capture(make_unique<CollectionPayload>(0, ARRAY));
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendSimpleStrArr(StrSpan arr) {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
DCHECK_EQ(current_.index(), 0u);
|
||||
|
||||
Capture(StrArrPayload{true, ARRAY, {arr.begin(), arr.end()}});
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendStringArr(StrSpan arr, CollectionType type) {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
DCHECK_EQ(current_.index(), 0u);
|
||||
|
||||
// TODO: 1. Allocate all strings at once 2. Allow movable types
|
||||
Capture(StrArrPayload{false, type, {arr.begin(), arr.end()}});
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendNull() {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
Capture(nullptr_t{});
|
||||
|
@ -86,13 +53,6 @@ void CapturingReplyBuilder::SendBulkString(std::string_view str) {
|
|||
Capture(BulkString{string{str}});
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
std::vector<std::pair<std::string, double>> values(arr.begin(), arr.end());
|
||||
Capture(ScoredArray{std::move(values), with_scores});
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::StartCollection(unsigned len, CollectionType type) {
|
||||
SKIP_LESS(ReplyMode::FULL);
|
||||
stack_.emplace(make_unique<CollectionPayload>(len, type), type == MAP ? len * 2 : len);
|
||||
|
@ -109,8 +69,8 @@ CapturingReplyBuilder::Payload CapturingReplyBuilder::Take() {
|
|||
}
|
||||
|
||||
void CapturingReplyBuilder::SendDirect(Payload&& val) {
|
||||
has_replied_ = !holds_alternative<monostate>(val);
|
||||
bool is_err = holds_alternative<Error>(val) || holds_alternative<OpStatus>(val);
|
||||
replies_recorded_ += !holds_alternative<monostate>(val);
|
||||
bool is_err = holds_alternative<Error>(val);
|
||||
ReplyMode min_mode = is_err ? ReplyMode::ONLY_ERR : ReplyMode::FULL;
|
||||
if (reply_mode_ >= min_mode) {
|
||||
DCHECK_EQ(current_.index(), 0u);
|
||||
|
@ -178,13 +138,6 @@ struct CaptureVisitor {
|
|||
rb->SendError(status);
|
||||
}
|
||||
|
||||
void operator()(const CapturingReplyBuilder::StrArrPayload& sa) {
|
||||
if (sa.simple)
|
||||
rb->SendSimpleStrArr(sa.arr);
|
||||
else
|
||||
rb->SendStringArr(sa.arr, sa.type);
|
||||
}
|
||||
|
||||
void operator()(const unique_ptr<CapturingReplyBuilder::CollectionPayload>& cp) {
|
||||
if (!cp) {
|
||||
rb->SendNullArray();
|
||||
|
@ -199,14 +152,6 @@ struct CaptureVisitor {
|
|||
visit(*this, std::move(pl));
|
||||
}
|
||||
|
||||
void operator()(SinkReplyBuilder::MGetResponse resp) {
|
||||
rb->SendMGetResponse(std::move(resp));
|
||||
}
|
||||
|
||||
void operator()(const CapturingReplyBuilder::ScoredArray& sarr) {
|
||||
rb->SendScoredArray(sarr.arr, sarr.with_scores);
|
||||
}
|
||||
|
||||
RedisReplyBuilder* rb;
|
||||
};
|
||||
|
||||
|
@ -219,7 +164,7 @@ void CapturingReplyBuilder::Apply(Payload&& pl, RedisReplyBuilder* rb) {
|
|||
CaptureVisitor cv{rb};
|
||||
visit(cv, std::move(pl));
|
||||
// Consumed and printed by InvokeCmd. We just send the actual error here
|
||||
std::ignore = rb->ConsumeLastError();
|
||||
rb->ConsumeLastError();
|
||||
}
|
||||
|
||||
void CapturingReplyBuilder::SetReplyMode(ReplyMode mode) {
|
||||
|
|
|
@ -23,47 +23,23 @@ class CapturingReplyBuilder : public RedisReplyBuilder {
|
|||
friend struct CaptureVisitor;
|
||||
|
||||
public:
|
||||
void SendError(std::string_view str, std::string_view type = {}) override;
|
||||
void SendMGetResponse(MGetResponse resp) override;
|
||||
|
||||
// SendStored -> SendSimpleString("OK")
|
||||
// SendSetSkipped -> SendNull()
|
||||
void SendError(OpStatus status) override;
|
||||
using RedisReplyBuilder::SendError;
|
||||
void SendError(std::string_view str, std::string_view type) override;
|
||||
|
||||
void SendNullArray() override;
|
||||
void SendEmptyArray() override;
|
||||
void SendSimpleStrArr(StrSpan arr) override;
|
||||
void SendStringArr(StrSpan arr, CollectionType type = ARRAY) override;
|
||||
|
||||
void SendNull() override;
|
||||
void SendLong(long val) override;
|
||||
void SendDouble(double val) override;
|
||||
void SendSimpleString(std::string_view str) override;
|
||||
|
||||
void SendBulkString(std::string_view str) override;
|
||||
void SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) override;
|
||||
|
||||
void StartCollection(unsigned len, CollectionType type) override;
|
||||
void SendNullArray() override;
|
||||
void SendNull() override;
|
||||
|
||||
public:
|
||||
using Error = std::pair<std::string, std::string>; // SendError (msg, type)
|
||||
using Null = std::nullptr_t; // SendNull or SendNullArray
|
||||
|
||||
struct StrArrPayload {
|
||||
bool simple;
|
||||
CollectionType type;
|
||||
std::vector<std::string> arr;
|
||||
};
|
||||
|
||||
struct CollectionPayload;
|
||||
|
||||
struct ScoredArray {
|
||||
std::vector<std::pair<std::string, double>> arr;
|
||||
bool with_scores;
|
||||
};
|
||||
|
||||
struct SimpleString : public std::string {}; // SendSimpleString
|
||||
struct BulkString : public std::string {}; // SendBulkString
|
||||
|
||||
|
@ -71,9 +47,8 @@ class CapturingReplyBuilder : public RedisReplyBuilder {
|
|||
: RedisReplyBuilder{nullptr}, reply_mode_{mode}, stack_{}, current_{} {
|
||||
}
|
||||
|
||||
using Payload =
|
||||
std::variant<std::monostate, Null, Error, OpStatus, long, double, SimpleString, BulkString,
|
||||
StrArrPayload, std::unique_ptr<CollectionPayload>, MGetResponse, ScoredArray>;
|
||||
using Payload = std::variant<std::monostate, Null, Error, long, double, SimpleString, BulkString,
|
||||
std::unique_ptr<CollectionPayload>>;
|
||||
|
||||
// Non owned Error based on SendError arguments (msg, type)
|
||||
using ErrorRef = std::pair<std::string_view, std::string_view>;
|
||||
|
|
|
@ -36,7 +36,7 @@ class ServiceInterface {
|
|||
virtual void DispatchMC(const MemcacheParser::Command& cmd, std::string_view value,
|
||||
MCReplyBuilder* builder, ConnectionContext* cntx) = 0;
|
||||
|
||||
virtual ConnectionContext* CreateContext(util::FiberSocketBase* peer, Connection* owner) = 0;
|
||||
virtual ConnectionContext* CreateContext(Connection* owner) = 0;
|
||||
|
||||
virtual void ConfigureHttpHandlers(util::HttpListenerBase* base, bool is_privileged) {
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "base/logging.h"
|
||||
#include "core/overloaded.h"
|
||||
#include "facade/dragonfly_connection.h"
|
||||
#include "facade/dragonfly_listener.h"
|
||||
#include "facade/facade_types.h"
|
||||
#include "io/file.h"
|
||||
#include "io/file_util.h"
|
||||
|
@ -102,14 +103,13 @@ void AclFamily::StreamUpdatesToAllProactorConnections(const std::string& user,
|
|||
auto update_cb = [&]([[maybe_unused]] size_t id, util::Connection* conn) {
|
||||
DCHECK(conn);
|
||||
auto connection = static_cast<facade::Connection*>(conn);
|
||||
if (connection->protocol() == facade::Protocol::REDIS && !connection->IsHttp() &&
|
||||
connection->cntx()) {
|
||||
if (!connection->IsHttp() && connection->cntx()) {
|
||||
connection->SendAclUpdateAsync(
|
||||
facade::Connection::AclUpdateMessage{user, update_commands, update_keys, update_pub_sub});
|
||||
}
|
||||
};
|
||||
|
||||
if (main_listener_) {
|
||||
if (main_listener_ && main_listener_->protocol() == facade::Protocol::REDIS) {
|
||||
main_listener_->TraverseConnections(update_cb);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "absl/container/flat_hash_set.h"
|
||||
#include "facade/dragonfly_listener.h"
|
||||
#include "facade/facade_types.h"
|
||||
#include "helio/util/proactor_pool.h"
|
||||
#include "server/acl/acl_commands_def.h"
|
||||
|
@ -20,6 +19,7 @@
|
|||
|
||||
namespace facade {
|
||||
class SinkReplyBuilder;
|
||||
class Listener;
|
||||
} // namespace facade
|
||||
|
||||
namespace dfly {
|
||||
|
|
|
@ -61,7 +61,7 @@ void BlockingControllerTest::SetUp() {
|
|||
arg_vec_.emplace_back(s);
|
||||
}
|
||||
|
||||
trans_->InitByArgs(&namespaces.GetDefaultNamespace(), 0, {arg_vec_.data(), arg_vec_.size()});
|
||||
trans_->InitByArgs(&namespaces->GetDefaultNamespace(), 0, {arg_vec_.data(), arg_vec_.size()});
|
||||
CHECK_EQ(0u, Shard("x", shard_set->size()));
|
||||
CHECK_EQ(2u, Shard("z", shard_set->size()));
|
||||
|
||||
|
@ -71,7 +71,6 @@ void BlockingControllerTest::SetUp() {
|
|||
|
||||
void BlockingControllerTest::TearDown() {
|
||||
shard_set->PreShutdown();
|
||||
namespaces.Clear();
|
||||
shard_set->Shutdown();
|
||||
delete shard_set;
|
||||
|
||||
|
@ -81,7 +80,7 @@ void BlockingControllerTest::TearDown() {
|
|||
|
||||
TEST_F(BlockingControllerTest, Basic) {
|
||||
trans_->ScheduleSingleHop([&](Transaction* t, EngineShard* shard) {
|
||||
BlockingController bc(shard, &namespaces.GetDefaultNamespace());
|
||||
BlockingController bc(shard, &namespaces->GetDefaultNamespace());
|
||||
auto keys = t->GetShardArgs(shard->shard_id());
|
||||
bc.AddWatched(
|
||||
keys, [](auto...) { return true; }, t);
|
||||
|
@ -107,7 +106,7 @@ TEST_F(BlockingControllerTest, Timeout) {
|
|||
unsigned num_watched = shard_set->Await(
|
||||
|
||||
0, [&] {
|
||||
return namespaces.GetDefaultNamespace()
|
||||
return namespaces->GetDefaultNamespace()
|
||||
.GetBlockingController(EngineShard::tlocal()->shard_id())
|
||||
->NumWatched(0);
|
||||
});
|
||||
|
|
|
@ -477,7 +477,7 @@ void DeleteSlots(const SlotRanges& slots_ranges) {
|
|||
if (shard == nullptr)
|
||||
return;
|
||||
|
||||
namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id()).FlushSlots(slots_ranges);
|
||||
namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id()).FlushSlots(slots_ranges);
|
||||
};
|
||||
shard_set->pool()->AwaitFiberOnAll(std::move(cb));
|
||||
}
|
||||
|
@ -633,7 +633,7 @@ void ClusterFamily::DflyClusterGetSlotInfo(CmdArgList args, SinkReplyBuilder* bu
|
|||
|
||||
util::fb2::LockGuard lk(mu);
|
||||
for (auto& [slot, data] : slots_stats) {
|
||||
data += namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id()).GetSlotStats(slot);
|
||||
data += namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id()).GetSlotStats(slot);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ uint64_t GetKeyCount(const SlotRanges& slots) {
|
|||
uint64_t shard_keys = 0;
|
||||
for (const SlotRange& range : slots) {
|
||||
for (SlotId slot = range.start; slot <= range.end; slot++) {
|
||||
shard_keys += namespaces.GetDefaultNamespace()
|
||||
shard_keys += namespaces->GetDefaultNamespace()
|
||||
.GetDbSlice(shard->shard_id())
|
||||
.GetSlotStats(slot)
|
||||
.key_count;
|
||||
|
|
|
@ -101,7 +101,7 @@ OutgoingMigration::OutgoingMigration(MigrationInfo info, ClusterFamily* cf, Serv
|
|||
server_family_(sf),
|
||||
cf_(cf),
|
||||
tx_(new Transaction{sf->service().FindCmd("DFLYCLUSTER")}) {
|
||||
tx_->InitByArgs(&namespaces.GetDefaultNamespace(), 0, {});
|
||||
tx_->InitByArgs(&namespaces->GetDefaultNamespace(), 0, {});
|
||||
}
|
||||
|
||||
OutgoingMigration::~OutgoingMigration() {
|
||||
|
@ -218,7 +218,7 @@ void OutgoingMigration::SyncFb() {
|
|||
}
|
||||
|
||||
OnAllShards([this](auto& migration) {
|
||||
DbSlice& db_slice = namespaces.GetDefaultNamespace().GetCurrentDbSlice();
|
||||
DbSlice& db_slice = namespaces->GetDefaultNamespace().GetCurrentDbSlice();
|
||||
server_family_->journal()->StartInThread();
|
||||
migration = std::make_unique<SliceSlotMigration>(
|
||||
&db_slice, server(), migration_info_.slot_ranges, server_family_->journal());
|
||||
|
@ -291,8 +291,8 @@ bool OutgoingMigration::FinalizeMigration(long attempt) {
|
|||
bool is_block_active = true;
|
||||
auto is_pause_in_progress = [&is_block_active] { return is_block_active; };
|
||||
auto pause_fb_opt =
|
||||
Pause(server_family_->GetNonPriviligedListeners(), &namespaces.GetDefaultNamespace(), nullptr,
|
||||
ClientPause::WRITE, is_pause_in_progress);
|
||||
Pause(server_family_->GetNonPriviligedListeners(), &namespaces->GetDefaultNamespace(),
|
||||
nullptr, ClientPause::WRITE, is_pause_in_progress);
|
||||
|
||||
if (!pause_fb_opt) {
|
||||
LOG(WARNING) << "Cluster migration finalization time out";
|
||||
|
|
|
@ -18,6 +18,8 @@ extern "C" {
|
|||
#include "base/flags.h"
|
||||
#include "base/logging.h"
|
||||
#include "core/compact_object.h"
|
||||
#include "core/interpreter.h"
|
||||
#include "server/conn_context.h"
|
||||
#include "server/engine_shard_set.h"
|
||||
#include "server/error.h"
|
||||
#include "server/journal/journal.h"
|
||||
|
@ -118,6 +120,7 @@ atomic_uint64_t rss_mem_peak(0);
|
|||
unsigned kernel_version = 0;
|
||||
size_t max_memory_limit = 0;
|
||||
size_t serialization_max_chunk_size = 0;
|
||||
Namespaces* namespaces = nullptr;
|
||||
|
||||
const char* GlobalStateName(GlobalState s) {
|
||||
switch (s) {
|
||||
|
@ -453,4 +456,29 @@ void ThreadLocalMutex::unlock() {
|
|||
}
|
||||
}
|
||||
|
||||
BorrowedInterpreter::BorrowedInterpreter(Transaction* tx, ConnectionState* state) {
|
||||
// Ensure squashing ignores EVAL. We can't run on a stub context, because it doesn't have our
|
||||
// preborrowed interpreter (which can't be shared on multiple threads).
|
||||
CHECK(!state->squashing_info);
|
||||
|
||||
if (auto borrowed = state->exec_info.preborrowed_interpreter; borrowed) {
|
||||
// Ensure a preborrowed interpreter is only set for an already running MULTI transaction.
|
||||
CHECK_EQ(state->exec_info.state, ConnectionState::ExecInfo::EXEC_RUNNING);
|
||||
|
||||
interpreter_ = borrowed;
|
||||
} else {
|
||||
// A scheduled transaction occupies a place in the transaction queue and holds locks,
|
||||
// preventing other transactions from progressing. Blocking below can deadlock!
|
||||
CHECK(!tx->IsScheduled());
|
||||
|
||||
interpreter_ = ServerState::tlocal()->BorrowInterpreter();
|
||||
owned_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
BorrowedInterpreter::~BorrowedInterpreter() {
|
||||
if (owned_)
|
||||
ServerState::tlocal()->ReturnInterpreter(interpreter_);
|
||||
}
|
||||
|
||||
} // namespace dfly
|
||||
|
|
|
@ -47,6 +47,9 @@ using RdbTypeFreqMap = absl::flat_hash_map<unsigned, size_t>;
|
|||
class CommandId;
|
||||
class Transaction;
|
||||
class EngineShard;
|
||||
class ConnectionState;
|
||||
class Interpreter;
|
||||
class Namespaces;
|
||||
|
||||
struct LockTagOptions {
|
||||
bool enabled = false;
|
||||
|
@ -130,6 +133,8 @@ extern std::atomic_uint64_t rss_mem_peak;
|
|||
|
||||
extern size_t max_memory_limit;
|
||||
|
||||
extern Namespaces* namespaces;
|
||||
|
||||
// version 5.11 maps to 511 etc.
|
||||
// set upon server start.
|
||||
extern unsigned kernel_version;
|
||||
|
@ -353,6 +358,29 @@ template <typename Mutex> class ABSL_SCOPED_LOCKABLE SharedLock {
|
|||
bool is_locked_;
|
||||
};
|
||||
|
||||
// Ensures availability of an interpreter for EVAL-like commands and it's automatic release.
|
||||
// If it's part of MULTI, the preborrowed interpreter is returned, otherwise a new is acquired.
|
||||
struct BorrowedInterpreter {
|
||||
BorrowedInterpreter(Transaction* tx, ConnectionState* state);
|
||||
|
||||
~BorrowedInterpreter();
|
||||
|
||||
// Give up ownership of the interpreter, it must be returned manually.
|
||||
Interpreter* Release() && {
|
||||
DCHECK(owned_);
|
||||
owned_ = false;
|
||||
return interpreter_;
|
||||
}
|
||||
|
||||
operator Interpreter*() {
|
||||
return interpreter_;
|
||||
}
|
||||
|
||||
private:
|
||||
Interpreter* interpreter_ = nullptr;
|
||||
bool owned_ = false;
|
||||
};
|
||||
|
||||
extern size_t serialization_max_chunk_size;
|
||||
|
||||
} // namespace dfly
|
||||
|
|
|
@ -72,6 +72,13 @@ size_t StoredCmd::NumArgs() const {
|
|||
return sizes_.size();
|
||||
}
|
||||
|
||||
std::string StoredCmd::FirstArg() const {
|
||||
if (sizes_.size() == 0) {
|
||||
return {};
|
||||
}
|
||||
return buffer_.substr(0, sizes_[0]);
|
||||
}
|
||||
|
||||
facade::ReplyMode StoredCmd::ReplyMode() const {
|
||||
return reply_mode_;
|
||||
}
|
||||
|
@ -93,9 +100,8 @@ const CommandId* StoredCmd::Cid() const {
|
|||
return cid_;
|
||||
}
|
||||
|
||||
ConnectionContext::ConnectionContext(::io::Sink* stream, facade::Connection* owner,
|
||||
acl::UserCredentials cred)
|
||||
: facade::ConnectionContext(stream, owner) {
|
||||
ConnectionContext::ConnectionContext(facade::Connection* owner, acl::UserCredentials cred)
|
||||
: facade::ConnectionContext(owner) {
|
||||
if (owner) {
|
||||
skip_acl_validation = owner->IsPrivileged();
|
||||
}
|
||||
|
@ -110,7 +116,7 @@ ConnectionContext::ConnectionContext(::io::Sink* stream, facade::Connection* own
|
|||
}
|
||||
|
||||
ConnectionContext::ConnectionContext(const ConnectionContext* owner, Transaction* tx)
|
||||
: facade::ConnectionContext(nullptr, nullptr), transaction{tx} {
|
||||
: facade::ConnectionContext(nullptr), transaction{tx} {
|
||||
if (owner) {
|
||||
acl_commands = owner->acl_commands;
|
||||
keys = owner->keys;
|
||||
|
|
|
@ -46,6 +46,8 @@ class StoredCmd {
|
|||
Fill(absl::MakeSpan(*dest));
|
||||
}
|
||||
|
||||
std::string FirstArg() const;
|
||||
|
||||
const CommandId* Cid() const;
|
||||
|
||||
facade::ReplyMode ReplyMode() const;
|
||||
|
@ -266,7 +268,7 @@ struct ConnectionState {
|
|||
|
||||
class ConnectionContext : public facade::ConnectionContext {
|
||||
public:
|
||||
ConnectionContext(::io::Sink* stream, facade::Connection* owner, dfly::acl::UserCredentials cred);
|
||||
ConnectionContext(facade::Connection* owner, dfly::acl::UserCredentials cred);
|
||||
ConnectionContext(const ConnectionContext* owner, Transaction* tx);
|
||||
|
||||
struct DebugInfo {
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "base/flags.h"
|
||||
#include "base/logging.h"
|
||||
#include "core/qlist.h"
|
||||
#include "core/sorted_map.h"
|
||||
#include "core/string_map.h"
|
||||
#include "core/string_set.h"
|
||||
|
@ -152,24 +153,41 @@ quicklistEntry QLEntry() {
|
|||
}
|
||||
|
||||
bool IterateList(const PrimeValue& pv, const IterateFunc& func, long start, long end) {
|
||||
quicklist* ql = static_cast<quicklist*>(pv.RObjPtr());
|
||||
long llen = quicklistCount(ql);
|
||||
if (end < 0 || end >= llen)
|
||||
end = llen - 1;
|
||||
|
||||
quicklistIter* qiter = quicklistGetIteratorAtIdx(ql, AL_START_HEAD, start);
|
||||
quicklistEntry entry = QLEntry();
|
||||
long lrange = end - start + 1;
|
||||
|
||||
bool success = true;
|
||||
while (success && quicklistNext(qiter, &entry) && lrange-- > 0) {
|
||||
if (entry.value) {
|
||||
success = func(ContainerEntry{reinterpret_cast<char*>(entry.value), entry.sz});
|
||||
} else {
|
||||
success = func(ContainerEntry{entry.longval});
|
||||
|
||||
if (pv.Encoding() == OBJ_ENCODING_QUICKLIST) {
|
||||
quicklist* ql = static_cast<quicklist*>(pv.RObjPtr());
|
||||
long llen = quicklistCount(ql);
|
||||
if (end < 0 || end >= llen)
|
||||
end = llen - 1;
|
||||
|
||||
quicklistIter* qiter = quicklistGetIteratorAtIdx(ql, AL_START_HEAD, start);
|
||||
quicklistEntry entry = QLEntry();
|
||||
long lrange = end - start + 1;
|
||||
|
||||
while (success && quicklistNext(qiter, &entry) && lrange-- > 0) {
|
||||
if (entry.value) {
|
||||
success = func(ContainerEntry{reinterpret_cast<char*>(entry.value), entry.sz});
|
||||
} else {
|
||||
success = func(ContainerEntry{entry.longval});
|
||||
}
|
||||
}
|
||||
quicklistReleaseIterator(qiter);
|
||||
return success;
|
||||
}
|
||||
quicklistReleaseIterator(qiter);
|
||||
DCHECK_EQ(pv.Encoding(), kEncodingQL2);
|
||||
QList* ql = static_cast<QList*>(pv.RObjPtr());
|
||||
|
||||
ql->Iterate(
|
||||
[&](const QList::Entry& entry) {
|
||||
if (entry.is_int()) {
|
||||
success = func(ContainerEntry{entry.ival()});
|
||||
} else {
|
||||
success = func(ContainerEntry{entry.view().data(), entry.view().size()});
|
||||
}
|
||||
return success;
|
||||
},
|
||||
start, end);
|
||||
return success;
|
||||
}
|
||||
|
||||
|
|
|
@ -842,9 +842,9 @@ void DebugCmd::Watched(facade::SinkReplyBuilder* builder) {
|
|||
shard_set->RunBlockingInParallel(cb);
|
||||
rb->StartArray(4);
|
||||
rb->SendBulkString("awaked");
|
||||
rb->SendStringArr(awaked_trans);
|
||||
rb->SendBulkStrArr(awaked_trans);
|
||||
rb->SendBulkString("watched");
|
||||
rb->SendStringArr(watched_keys);
|
||||
rb->SendBulkStrArr(watched_keys);
|
||||
}
|
||||
|
||||
void DebugCmd::TxAnalysis(facade::SinkReplyBuilder* builder) {
|
||||
|
|
|
@ -434,7 +434,7 @@ void SaveStagesController::CloseCb(unsigned index) {
|
|||
}
|
||||
|
||||
if (auto* es = EngineShard::tlocal(); use_dfs_format_ && es)
|
||||
namespaces.GetDefaultNamespace().GetDbSlice(es->shard_id()).ResetUpdateEvents();
|
||||
namespaces->GetDefaultNamespace().GetDbSlice(es->shard_id()).ResetUpdateEvents();
|
||||
}
|
||||
|
||||
void SaveStagesController::RunStage(void (SaveStagesController::*cb)(unsigned)) {
|
||||
|
|
|
@ -77,7 +77,7 @@ bool WaitReplicaFlowToCatchup(absl::Time end_time, const DflyCmd::ReplicaInfo* r
|
|||
EngineShard* shard) {
|
||||
// We don't want any writes to the journal after we send the `PING`,
|
||||
// and expirations could ruin that.
|
||||
namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id()).SetExpireAllowed(false);
|
||||
namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id()).SetExpireAllowed(false);
|
||||
shard->journal()->RecordEntry(0, journal::Op::PING, 0, 0, nullopt, {}, true);
|
||||
|
||||
const FlowInfo* flow = &replica->flows[shard->shard_id()];
|
||||
|
@ -455,7 +455,7 @@ void DflyCmd::TakeOver(CmdArgList args, RedisReplyBuilder* rb, ConnectionContext
|
|||
absl::Cleanup cleanup([] {
|
||||
VLOG(2) << "Enabling expiration";
|
||||
shard_set->RunBriefInParallel([](EngineShard* shard) {
|
||||
namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id()).SetExpireAllowed(true);
|
||||
namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id()).SetExpireAllowed(true);
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -379,7 +379,7 @@ TEST_F(DflyEngineTest, MemcacheFlags) {
|
|||
ASSERT_EQ(Run("resp", {"flushdb"}), "OK");
|
||||
pp_->AwaitFiberOnAll([](auto*) {
|
||||
if (auto* shard = EngineShard::tlocal(); shard) {
|
||||
EXPECT_EQ(namespaces.GetDefaultNamespace()
|
||||
EXPECT_EQ(namespaces->GetDefaultNamespace()
|
||||
.GetDbSlice(shard->shard_id())
|
||||
.GetDBTable(0)
|
||||
->mcflag.size(),
|
||||
|
@ -600,7 +600,7 @@ TEST_F(DflyEngineTest, Bug468) {
|
|||
|
||||
TEST_F(DflyEngineTest, Bug496) {
|
||||
shard_set->RunBlockingInParallel([](EngineShard* shard) {
|
||||
auto& db = namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id());
|
||||
auto& db = namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id());
|
||||
|
||||
int cb_hits = 0;
|
||||
uint32_t cb_id =
|
||||
|
|
|
@ -309,7 +309,7 @@ bool EngineShard::DoDefrag() {
|
|||
const float threshold = GetFlag(FLAGS_mem_defrag_page_utilization_threshold);
|
||||
|
||||
// TODO: enable tiered storage on non-default db slice
|
||||
DbSlice& slice = namespaces.GetDefaultNamespace().GetDbSlice(shard_->shard_id());
|
||||
DbSlice& slice = namespaces->GetDefaultNamespace().GetDbSlice(shard_->shard_id());
|
||||
|
||||
// If we moved to an invalid db, skip as long as it's not the last one
|
||||
while (!slice.IsDbValid(defrag_state_.dbid) && defrag_state_.dbid + 1 < slice.db_array_size())
|
||||
|
@ -339,7 +339,7 @@ bool EngineShard::DoDefrag() {
|
|||
}
|
||||
});
|
||||
traverses_count++;
|
||||
} while (traverses_count < kMaxTraverses && cur && namespaces.IsInitialized());
|
||||
} while (traverses_count < kMaxTraverses && cur && namespaces);
|
||||
|
||||
defrag_state_.UpdateScanState(cur.value());
|
||||
|
||||
|
@ -370,7 +370,7 @@ bool EngineShard::DoDefrag() {
|
|||
// priority.
|
||||
// otherwise lower the task priority so that it would not use the CPU when not required
|
||||
uint32_t EngineShard::DefragTask() {
|
||||
if (!namespaces.IsInitialized()) {
|
||||
if (!namespaces) {
|
||||
return util::ProactorBase::kOnIdleMaxLevel;
|
||||
}
|
||||
|
||||
|
@ -392,7 +392,6 @@ EngineShard::EngineShard(util::ProactorBase* pb, mi_heap_t* heap)
|
|||
txq_([](const Transaction* t) { return t->txid(); }),
|
||||
mi_resource_(heap),
|
||||
shard_id_(pb->GetPoolIndex()) {
|
||||
defrag_task_ = pb->AddOnIdleTask([this]() { return DefragTask(); });
|
||||
queue_.Start(absl::StrCat("shard_queue_", shard_id()));
|
||||
queue2_.Start(absl::StrCat("l2_queue_", shard_id()));
|
||||
}
|
||||
|
@ -452,6 +451,7 @@ void EngineShard::StartPeriodicHeartbeatFiber(util::ProactorBase* pb) {
|
|||
ThisFiber::SetName(absl::StrCat("heartbeat_periodic", index));
|
||||
RunFPeriodically(heartbeat, period_ms, "heartbeat", &fiber_heartbeat_periodic_done_);
|
||||
});
|
||||
defrag_task_ = pb->AddOnIdleTask([this]() { return DefragTask(); });
|
||||
}
|
||||
|
||||
void EngineShard::StartPeriodicShardHandlerFiber(util::ProactorBase* pb,
|
||||
|
@ -492,7 +492,7 @@ void EngineShard::InitTieredStorage(ProactorBase* pb, size_t max_file_size) {
|
|||
<< "Only ioring based backing storage is supported. Exiting...";
|
||||
|
||||
// TODO: enable tiered storage on non-default namespace
|
||||
DbSlice& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
DbSlice& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
auto* shard = EngineShard::tlocal();
|
||||
shard->tiered_storage_ = make_unique<TieredStorage>(max_file_size, &db_slice);
|
||||
error_code ec = shard->tiered_storage_->Open(backing_prefix);
|
||||
|
@ -657,7 +657,7 @@ void EngineShard::RemoveContTx(Transaction* tx) {
|
|||
|
||||
void EngineShard::Heartbeat() {
|
||||
DVLOG(2) << " Hearbeat";
|
||||
DCHECK(namespaces.IsInitialized());
|
||||
DCHECK(namespaces);
|
||||
|
||||
CacheStats();
|
||||
|
||||
|
@ -666,7 +666,7 @@ void EngineShard::Heartbeat() {
|
|||
}
|
||||
|
||||
// TODO: iterate over all namespaces
|
||||
DbSlice& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
DbSlice& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
|
||||
// Offset CoolMemoryUsage when consider background offloading.
|
||||
// TODO: Another approach could be is to align the approach similarly to how we do with
|
||||
|
@ -692,7 +692,7 @@ void EngineShard::Heartbeat() {
|
|||
|
||||
void EngineShard::RetireExpiredAndEvict() {
|
||||
// TODO: iterate over all namespaces
|
||||
DbSlice& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
DbSlice& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
// Some of the functions below might acquire the same lock again so we need to unlock it
|
||||
// asap. We won't yield before we relock the mutex again, so the code below is atomic
|
||||
// in respect to preemptions of big values. An example of that is the call to
|
||||
|
@ -758,7 +758,7 @@ void EngineShard::CacheStats() {
|
|||
cache_stats_time_ = now;
|
||||
// Used memory for this shard.
|
||||
size_t used_mem = UsedMemory();
|
||||
DbSlice& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
DbSlice& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard_id());
|
||||
|
||||
// delta can wrap if used_memory is smaller than last_cached_used_memory_ and it's fine.
|
||||
size_t delta = used_mem - last_cached_used_memory_;
|
||||
|
@ -808,7 +808,7 @@ EngineShard::TxQueueInfo EngineShard::AnalyzeTxQueue() const {
|
|||
info.tx_total = queue->size();
|
||||
unsigned max_db_id = 0;
|
||||
|
||||
auto& db_slice = namespaces.GetDefaultNamespace().GetCurrentDbSlice();
|
||||
auto& db_slice = namespaces->GetDefaultNamespace().GetCurrentDbSlice();
|
||||
|
||||
{
|
||||
auto value = queue->At(cur);
|
||||
|
|
|
@ -103,7 +103,10 @@ EngineShardSet* shard_set = nullptr;
|
|||
|
||||
void EngineShardSet::Init(uint32_t sz, std::function<void()> shard_handler) {
|
||||
CHECK_EQ(0u, size());
|
||||
CHECK(namespaces == nullptr);
|
||||
|
||||
shards_.reset(new EngineShard*[sz]);
|
||||
|
||||
size_ = sz;
|
||||
size_t max_shard_file_size = GetTieredFileLimit(sz);
|
||||
pp_->AwaitFiberOnAll([this](uint32_t index, ProactorBase* pb) {
|
||||
|
@ -112,7 +115,8 @@ void EngineShardSet::Init(uint32_t sz, std::function<void()> shard_handler) {
|
|||
}
|
||||
});
|
||||
|
||||
namespaces.Init();
|
||||
// The order is important here. We must initialize namespaces after shards_.
|
||||
namespaces = new Namespaces();
|
||||
|
||||
pp_->AwaitFiberOnAll([&](uint32_t index, ProactorBase* pb) {
|
||||
if (index < size_) {
|
||||
|
@ -139,7 +143,13 @@ void EngineShardSet::PreShutdown() {
|
|||
}
|
||||
|
||||
void EngineShardSet::Shutdown() {
|
||||
// Calling Namespaces::Clear before destroying engine shards, because it accesses them
|
||||
// internally.
|
||||
namespaces->Clear();
|
||||
RunBlockingInParallel([](EngineShard*) { EngineShard::DestroyThreadLocal(); });
|
||||
|
||||
delete namespaces;
|
||||
namespaces = nullptr;
|
||||
}
|
||||
|
||||
void EngineShardSet::InitThreadLocal(ProactorBase* pb) {
|
||||
|
@ -150,7 +160,7 @@ void EngineShardSet::InitThreadLocal(ProactorBase* pb) {
|
|||
|
||||
void EngineShardSet::TEST_EnableCacheMode() {
|
||||
RunBlockingInParallel([](EngineShard* shard) {
|
||||
namespaces.GetDefaultNamespace().GetCurrentDbSlice().TEST_EnableCacheMode();
|
||||
namespaces->GetDefaultNamespace().GetCurrentDbSlice().TEST_EnableCacheMode();
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -978,7 +978,7 @@ void GenericFamily::Del(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
VLOG(1) << "Del " << ArgS(args, 0);
|
||||
|
||||
atomic_uint32_t result{0};
|
||||
bool is_mc = (builder->type() == SinkReplyBuilder::MC);
|
||||
bool is_mc = (builder->GetProtocol() == Protocol::MEMCACHE);
|
||||
|
||||
auto cb = [&result](const Transaction* t, EngineShard* shard) {
|
||||
ShardArgs args = t->GetShardArgs(shard->shard_id());
|
||||
|
@ -1025,7 +1025,7 @@ void GenericFamily::Ping(CmdArgList args, Transaction* tx, SinkReplyBuilder* bui
|
|||
}
|
||||
|
||||
string_view resp[2] = {"pong", msg};
|
||||
return rb->SendStringArr(resp);
|
||||
return rb->SendBulkStrArr(resp);
|
||||
}
|
||||
|
||||
if (args.size() == 0) {
|
||||
|
|
|
@ -720,8 +720,8 @@ void HGetGeneric(CmdArgList args, uint8_t getall_mask, Transaction* tx, SinkRepl
|
|||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
if (result) {
|
||||
bool is_map = (getall_mask == (VALUES | FIELDS));
|
||||
rb->SendStringArr(absl::Span<const string>{*result},
|
||||
is_map ? RedisReplyBuilder::MAP : RedisReplyBuilder::ARRAY);
|
||||
rb->SendBulkStrArr(absl::Span<const string>{*result},
|
||||
is_map ? RedisReplyBuilder::MAP : RedisReplyBuilder::ARRAY);
|
||||
} else {
|
||||
builder->SendError(result.status());
|
||||
}
|
||||
|
@ -1220,7 +1220,7 @@ void HSetFamily::HRandField(CmdArgList args, Transaction* tx, SinkReplyBuilder*
|
|||
if ((result->size() == 1) && (args.size() == 1))
|
||||
rb->SendBulkString(result->front());
|
||||
else
|
||||
rb->SendStringArr(*result, facade::RedisReplyBuilder::ARRAY);
|
||||
rb->SendBulkStrArr(*result, facade::RedisReplyBuilder::ARRAY);
|
||||
} else if (result.status() == OpStatus::KEY_NOTFOUND) {
|
||||
if (args.size() == 1)
|
||||
rb->SendNull();
|
||||
|
|
|
@ -131,16 +131,6 @@ struct CaptureVisitor {
|
|||
absl::StrAppend(&str, "\"", facade::StatusToMsg(status), "\"");
|
||||
}
|
||||
|
||||
void operator()(const CapturingReplyBuilder::StrArrPayload& sa) {
|
||||
absl::StrAppend(&str, "[");
|
||||
for (const auto& val : sa.arr) {
|
||||
absl::StrAppend(&str, JsonEscape(val), ",");
|
||||
}
|
||||
if (sa.arr.size())
|
||||
str.pop_back();
|
||||
absl::StrAppend(&str, "]");
|
||||
}
|
||||
|
||||
void operator()(unique_ptr<CapturingReplyBuilder::CollectionPayload> cp) {
|
||||
if (!cp) {
|
||||
absl::StrAppend(&str, "null");
|
||||
|
@ -157,32 +147,6 @@ struct CaptureVisitor {
|
|||
}
|
||||
}
|
||||
|
||||
void operator()(facade::SinkReplyBuilder::MGetResponse resp) {
|
||||
absl::StrAppend(&str, "[");
|
||||
for (const auto& val : resp.resp_arr) {
|
||||
if (val) {
|
||||
absl::StrAppend(&str, JsonEscape(val->value), ",");
|
||||
} else {
|
||||
absl::StrAppend(&str, "null,");
|
||||
}
|
||||
}
|
||||
|
||||
if (resp.resp_arr.size())
|
||||
str.pop_back();
|
||||
absl::StrAppend(&str, "]");
|
||||
}
|
||||
|
||||
void operator()(const CapturingReplyBuilder::ScoredArray& sarr) {
|
||||
absl::StrAppend(&str, "[");
|
||||
for (const auto& [key, score] : sarr.arr) {
|
||||
absl::StrAppend(&str, "{", JsonEscape(key), ":", score, "},");
|
||||
}
|
||||
if (sarr.arr.size() > 0) {
|
||||
str.pop_back();
|
||||
}
|
||||
absl::StrAppend(&str, "]");
|
||||
}
|
||||
|
||||
string str;
|
||||
};
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ JournalExecutor::JournalExecutor(Service* service)
|
|||
conn_context_.is_replicating = true;
|
||||
conn_context_.journal_emulated = true;
|
||||
conn_context_.skip_acl_validation = true;
|
||||
conn_context_.ns = &namespaces.GetDefaultNamespace();
|
||||
conn_context_.ns = &namespaces->GetDefaultNamespace();
|
||||
}
|
||||
|
||||
JournalExecutor::~JournalExecutor() {
|
||||
|
|
|
@ -204,7 +204,7 @@ template <typename I> void Send(I begin, I end, RedisReplyBuilder* rb) {
|
|||
rb->SendEmptyArray();
|
||||
} else {
|
||||
if constexpr (is_same_v<decltype(*begin), const string>) {
|
||||
rb->SendStringArr(facade::OwnedArgSlice{begin, end});
|
||||
rb->SendBulkStrArr(facade::OwnedArgSlice{begin, end});
|
||||
} else {
|
||||
rb->StartArray(end - begin);
|
||||
for (auto i = begin; i != end; ++i) {
|
||||
|
|
|
@ -84,6 +84,10 @@ void* listPopSaver(unsigned char* data, size_t sz) {
|
|||
return new string((char*)data, sz);
|
||||
}
|
||||
|
||||
QList::Where ToWhere(ListDir dir) {
|
||||
return dir == ListDir::LEFT ? QList::HEAD : QList::TAIL;
|
||||
}
|
||||
|
||||
enum InsertParam { INSERT_BEFORE, INSERT_AFTER };
|
||||
|
||||
string ListPop(ListDir dir, quicklist* ql) {
|
||||
|
@ -156,13 +160,25 @@ std::string OpBPop(Transaction* t, EngineShard* shard, std::string_view key, Lis
|
|||
CHECK(it_res) << t->DebugId() << " " << key; // must exist and must be ok.
|
||||
|
||||
auto it = it_res->it;
|
||||
quicklist* ql = GetQL(it->second);
|
||||
std::string value;
|
||||
size_t len;
|
||||
|
||||
if (it->second.Encoding() == OBJ_ENCODING_QUICKLIST) {
|
||||
quicklist* ql = GetQL(it->second);
|
||||
|
||||
value = ListPop(dir, ql);
|
||||
len = quicklistCount(ql);
|
||||
} else {
|
||||
QList* ql = GetQLV2(it->second);
|
||||
QList::Where where = ToWhere(dir);
|
||||
value = ql->Pop(where);
|
||||
len = ql->Size();
|
||||
}
|
||||
|
||||
std::string value = ListPop(dir, ql);
|
||||
it_res->post_updater.Run();
|
||||
|
||||
OpArgs op_args = t->GetOpArgs(shard);
|
||||
if (quicklistCount(ql) == 0) {
|
||||
if (len == 0) {
|
||||
DVLOG(1) << "deleting key " << key << " " << t->DebugId();
|
||||
CHECK(op_args.GetDbSlice().Del(op_args.db_cntx, it));
|
||||
}
|
||||
|
@ -183,19 +199,37 @@ OpResult<string> OpMoveSingleShard(const OpArgs& op_args, string_view src, strin
|
|||
return src_res.status();
|
||||
|
||||
auto src_it = src_res->it;
|
||||
quicklist* src_ql = GetQL(src_it->second);
|
||||
quicklist* src_ql = nullptr;
|
||||
QList* srcql_v2 = nullptr;
|
||||
quicklist* dest_ql = nullptr;
|
||||
QList* destql_v2 = nullptr;
|
||||
string val;
|
||||
size_t prev_len = 0;
|
||||
|
||||
if (src_it->second.Encoding() == OBJ_ENCODING_QUICKLIST) {
|
||||
src_ql = GetQL(src_it->second);
|
||||
prev_len = quicklistCount(src_ql);
|
||||
} else {
|
||||
DCHECK_EQ(src_it->second.Encoding(), kEncodingQL2);
|
||||
srcql_v2 = GetQLV2(src_it->second);
|
||||
prev_len = srcql_v2->Size();
|
||||
}
|
||||
|
||||
if (src == dest) { // simple case.
|
||||
string val = ListPop(src_dir, src_ql);
|
||||
|
||||
int pos = (dest_dir == ListDir::LEFT) ? QUICKLIST_HEAD : QUICKLIST_TAIL;
|
||||
quicklistPush(src_ql, val.data(), val.size(), pos);
|
||||
if (src_ql) {
|
||||
val = ListPop(src_dir, src_ql);
|
||||
int pos = (dest_dir == ListDir::LEFT) ? QUICKLIST_HEAD : QUICKLIST_TAIL;
|
||||
quicklistPush(src_ql, val.data(), val.size(), pos);
|
||||
} else {
|
||||
val = srcql_v2->Pop(ToWhere(src_dir));
|
||||
srcql_v2->Push(val, ToWhere(dest_dir));
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
quicklist* dest_ql = nullptr;
|
||||
src_res->post_updater.Run();
|
||||
|
||||
auto op_res = db_slice.AddOrFind(op_args.db_cntx, dest);
|
||||
RETURN_ON_BAD_STATUS(op_res);
|
||||
auto& dest_res = *op_res;
|
||||
|
@ -205,26 +239,43 @@ OpResult<string> OpMoveSingleShard(const OpArgs& op_args, string_view src, strin
|
|||
src_it = src_res->it;
|
||||
|
||||
if (dest_res.is_new) {
|
||||
dest_ql = quicklistCreate();
|
||||
quicklistSetOptions(dest_ql, GetFlag(FLAGS_list_max_listpack_size),
|
||||
GetFlag(FLAGS_list_compress_depth));
|
||||
dest_res.it->second.InitRobj(OBJ_LIST, OBJ_ENCODING_QUICKLIST, dest_ql);
|
||||
DCHECK(IsValid(src_it));
|
||||
if (absl::GetFlag(FLAGS_list_experimental_v2)) {
|
||||
destql_v2 = CompactObj::AllocateMR<QList>(GetFlag(FLAGS_list_max_listpack_size),
|
||||
GetFlag(FLAGS_list_compress_depth));
|
||||
dest_res.it->second.InitRobj(OBJ_LIST, kEncodingQL2, destql_v2);
|
||||
} else {
|
||||
dest_ql = quicklistCreate();
|
||||
quicklistSetOptions(dest_ql, GetFlag(FLAGS_list_max_listpack_size),
|
||||
GetFlag(FLAGS_list_compress_depth));
|
||||
dest_res.it->second.InitRobj(OBJ_LIST, OBJ_ENCODING_QUICKLIST, dest_ql);
|
||||
}
|
||||
} else {
|
||||
if (dest_res.it->second.ObjType() != OBJ_LIST)
|
||||
return OpStatus::WRONG_TYPE;
|
||||
|
||||
dest_ql = GetQL(dest_res.it->second);
|
||||
if (dest_res.it->second.Encoding() == kEncodingQL2) {
|
||||
destql_v2 = GetQLV2(dest_res.it->second);
|
||||
} else {
|
||||
DCHECK_EQ(dest_res.it->second.Encoding(), OBJ_ENCODING_QUICKLIST);
|
||||
dest_ql = GetQL(dest_res.it->second);
|
||||
}
|
||||
}
|
||||
|
||||
string val = ListPop(src_dir, src_ql);
|
||||
int pos = (dest_dir == ListDir::LEFT) ? QUICKLIST_HEAD : QUICKLIST_TAIL;
|
||||
quicklistPush(dest_ql, val.data(), val.size(), pos);
|
||||
if (src_ql) {
|
||||
DCHECK(dest_ql);
|
||||
val = ListPop(src_dir, src_ql);
|
||||
int pos = (dest_dir == ListDir::LEFT) ? QUICKLIST_HEAD : QUICKLIST_TAIL;
|
||||
quicklistPush(dest_ql, val.data(), val.size(), pos);
|
||||
} else {
|
||||
DCHECK(srcql_v2);
|
||||
DCHECK(destql_v2);
|
||||
val = srcql_v2->Pop(ToWhere(src_dir));
|
||||
destql_v2->Push(val, ToWhere(dest_dir));
|
||||
}
|
||||
|
||||
src_res->post_updater.Run();
|
||||
dest_res.post_updater.Run();
|
||||
|
||||
if (quicklistCount(src_ql) == 0) {
|
||||
if (prev_len == 1) {
|
||||
CHECK(db_slice.Del(op_args.db_cntx, src_it));
|
||||
}
|
||||
|
||||
|
@ -242,17 +293,28 @@ OpResult<string> Peek(const OpArgs& op_args, string_view key, ListDir dir, bool
|
|||
if (!fetch)
|
||||
return OpStatus::OK;
|
||||
|
||||
quicklist* ql = GetQL(it_res.value()->second);
|
||||
quicklistEntry entry = container_utils::QLEntry();
|
||||
quicklistIter* iter = (dir == ListDir::LEFT) ? quicklistGetIterator(ql, AL_START_HEAD)
|
||||
: quicklistGetIterator(ql, AL_START_TAIL);
|
||||
CHECK(quicklistNext(iter, &entry));
|
||||
quicklistReleaseIterator(iter);
|
||||
const PrimeValue& pv = it_res.value()->second;
|
||||
DCHECK_GT(pv.Size(), 0u); // should be not-empty.
|
||||
|
||||
if (entry.value)
|
||||
return string(reinterpret_cast<char*>(entry.value), entry.sz);
|
||||
else
|
||||
return absl::StrCat(entry.longval);
|
||||
if (pv.Encoding() == OBJ_ENCODING_QUICKLIST) {
|
||||
quicklist* ql = GetQL(it_res.value()->second);
|
||||
quicklistEntry entry = container_utils::QLEntry();
|
||||
quicklistIter* iter =
|
||||
quicklistGetIterator(ql, (dir == ListDir::LEFT) ? AL_START_HEAD : AL_START_TAIL);
|
||||
|
||||
CHECK(quicklistNext(iter, &entry));
|
||||
quicklistReleaseIterator(iter);
|
||||
|
||||
return (entry.value) ? string(reinterpret_cast<char*>(entry.value), entry.sz)
|
||||
: absl::StrCat(entry.longval);
|
||||
}
|
||||
|
||||
DCHECK_EQ(pv.Encoding(), kEncodingQL2);
|
||||
QList* ql = GetQLV2(pv);
|
||||
auto it = ql->GetIterator(ToWhere(dir));
|
||||
CHECK(it.Next());
|
||||
|
||||
return it.Get().to_string();
|
||||
}
|
||||
|
||||
OpResult<uint32_t> OpPush(const OpArgs& op_args, std::string_view key, ListDir dir,
|
||||
|
@ -307,7 +369,7 @@ OpResult<uint32_t> OpPush(const OpArgs& op_args, std::string_view key, ListDir d
|
|||
}
|
||||
len = quicklistCount(ql);
|
||||
} else {
|
||||
QList::Where where = (dir == ListDir::LEFT) ? QList::HEAD : QList::TAIL;
|
||||
QList::Where where = ToWhere(dir);
|
||||
for (string_view v : vals) {
|
||||
ql_v2->Push(v, where);
|
||||
}
|
||||
|
@ -346,24 +408,47 @@ OpResult<StringVec> OpPop(const OpArgs& op_args, string_view key, ListDir dir, u
|
|||
return StringVec{};
|
||||
|
||||
auto it = it_res->it;
|
||||
quicklist* ql = GetQL(it->second);
|
||||
auto prev_len = quicklistCount(ql);
|
||||
size_t prev_len = 0;
|
||||
StringVec res;
|
||||
if (prev_len < count) {
|
||||
count = prev_len;
|
||||
}
|
||||
|
||||
if (return_results) {
|
||||
res.reserve(count);
|
||||
}
|
||||
if (it->second.Encoding() == kEncodingQL2) {
|
||||
QList* ql = GetQLV2(it->second);
|
||||
prev_len = ql->Size();
|
||||
|
||||
if (prev_len < count) {
|
||||
count = prev_len;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
string val = ListPop(dir, ql);
|
||||
if (return_results) {
|
||||
res.push_back(std::move(val));
|
||||
res.reserve(count);
|
||||
}
|
||||
|
||||
QList::Where where = ToWhere(dir);
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
string val = ql->Pop(where);
|
||||
if (return_results) {
|
||||
res.push_back(std::move(val));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
quicklist* ql = GetQL(it->second);
|
||||
prev_len = quicklistCount(ql);
|
||||
|
||||
if (prev_len < count) {
|
||||
count = prev_len;
|
||||
}
|
||||
|
||||
if (return_results) {
|
||||
res.reserve(count);
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < count; ++i) {
|
||||
string val = ListPop(dir, ql);
|
||||
if (return_results) {
|
||||
res.push_back(std::move(val));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
it_res->post_updater.Run();
|
||||
|
||||
if (count == prev_len) {
|
||||
|
@ -572,46 +657,74 @@ OpResult<uint32_t> OpRem(const OpArgs& op_args, string_view key, string_view ele
|
|||
return it_res.status();
|
||||
|
||||
auto it = it_res->it;
|
||||
quicklist* ql = GetQL(it->second);
|
||||
|
||||
int iter_direction = AL_START_HEAD;
|
||||
long long index = 0;
|
||||
if (count < 0) {
|
||||
count = -count;
|
||||
iter_direction = AL_START_TAIL;
|
||||
index = -1;
|
||||
}
|
||||
|
||||
quicklistIter qiter;
|
||||
quicklistInitIterator(&qiter, ql, iter_direction, index);
|
||||
quicklistEntry entry;
|
||||
size_t len = 0;
|
||||
unsigned removed = 0;
|
||||
|
||||
int64_t ival;
|
||||
|
||||
// try parsing the element into an integer.
|
||||
int is_int = lpStringToInt64(elem.data(), elem.size(), &ival);
|
||||
|
||||
auto is_match = [&](const quicklistEntry& entry) {
|
||||
if (is_int != (entry.value == nullptr))
|
||||
return false;
|
||||
if (it->second.Encoding() == kEncodingQL2) {
|
||||
QList* ql = GetQLV2(it->second);
|
||||
QList::Where where = QList::HEAD;
|
||||
|
||||
return is_int ? entry.longval == ival : ElemCompare(entry, elem);
|
||||
};
|
||||
|
||||
while (quicklistNext(&qiter, &entry)) {
|
||||
if (is_match(entry)) {
|
||||
quicklistDelEntry(&qiter, &entry);
|
||||
removed++;
|
||||
if (count && removed == count)
|
||||
break;
|
||||
if (count < 0) {
|
||||
count = -count;
|
||||
where = QList::TAIL;
|
||||
}
|
||||
}
|
||||
|
||||
auto it = ql->GetIterator(where);
|
||||
auto is_match = [&](const QList::Entry& entry) {
|
||||
return is_int ? entry.is_int() && entry.ival() == ival : entry == elem;
|
||||
};
|
||||
|
||||
while (it.Next()) {
|
||||
QList::Entry entry = it.Get();
|
||||
if (is_match(entry)) {
|
||||
it = ql->Erase(it);
|
||||
removed++;
|
||||
if (count && removed == count)
|
||||
break;
|
||||
}
|
||||
}
|
||||
len = ql->Size();
|
||||
} else {
|
||||
quicklist* ql = GetQL(it->second);
|
||||
|
||||
int iter_direction = AL_START_HEAD;
|
||||
long long index = 0;
|
||||
if (count < 0) {
|
||||
count = -count;
|
||||
iter_direction = AL_START_TAIL;
|
||||
index = -1;
|
||||
}
|
||||
|
||||
quicklistIter qiter;
|
||||
quicklistInitIterator(&qiter, ql, iter_direction, index);
|
||||
quicklistEntry entry;
|
||||
|
||||
auto is_match = [&](const quicklistEntry& entry) {
|
||||
if (is_int != (entry.value == nullptr))
|
||||
return false;
|
||||
|
||||
return is_int ? entry.longval == ival : ElemCompare(entry, elem);
|
||||
};
|
||||
|
||||
while (quicklistNext(&qiter, &entry)) {
|
||||
if (is_match(entry)) {
|
||||
quicklistDelEntry(&qiter, &entry);
|
||||
removed++;
|
||||
if (count && removed == count)
|
||||
break;
|
||||
}
|
||||
}
|
||||
quicklistCompressIterator(&qiter);
|
||||
len = quicklistCount(ql);
|
||||
}
|
||||
it_res->post_updater.Run();
|
||||
|
||||
quicklistCompressIterator(&qiter);
|
||||
|
||||
if (quicklistCount(ql) == 0) {
|
||||
if (len == 0) {
|
||||
CHECK(db_slice.Del(op_args.db_cntx, it));
|
||||
}
|
||||
|
||||
|
@ -642,8 +755,8 @@ OpStatus OpTrim(const OpArgs& op_args, string_view key, long start, long end) {
|
|||
return it_res.status();
|
||||
|
||||
auto it = it_res->it;
|
||||
quicklist* ql = GetQL(it->second);
|
||||
long llen = quicklistCount(ql);
|
||||
|
||||
long llen = it->second.Size();
|
||||
|
||||
/* convert negative indexes */
|
||||
if (start < 0)
|
||||
|
@ -668,12 +781,18 @@ OpStatus OpTrim(const OpArgs& op_args, string_view key, long start, long end) {
|
|||
rtrim = llen - end - 1;
|
||||
}
|
||||
|
||||
quicklistDelRange(ql, 0, ltrim);
|
||||
quicklistDelRange(ql, -rtrim, rtrim);
|
||||
|
||||
if (it->second.Encoding() == kEncodingQL2) {
|
||||
QList* ql = GetQLV2(it->second);
|
||||
ql->Erase(0, ltrim);
|
||||
ql->Erase(-rtrim, rtrim);
|
||||
} else {
|
||||
quicklist* ql = GetQL(it->second);
|
||||
quicklistDelRange(ql, 0, ltrim);
|
||||
quicklistDelRange(ql, -rtrim, rtrim);
|
||||
}
|
||||
it_res->post_updater.Run();
|
||||
|
||||
if (quicklistCount(ql) == 0) {
|
||||
if (it->second.Size() == 0) {
|
||||
CHECK(db_slice.Del(op_args.db_cntx, it));
|
||||
}
|
||||
return OpStatus::OK;
|
||||
|
@ -987,7 +1106,7 @@ void BPopGeneric(ListDir dir, CmdArgList args, Transaction* tx, SinkReplyBuilder
|
|||
if (popped_key) {
|
||||
DVLOG(1) << "BPop " << tx->DebugId() << " popped from key " << popped_key; // key.
|
||||
std::string_view str_arr[2] = {*popped_key, popped_value};
|
||||
return rb->SendStringArr(str_arr);
|
||||
return rb->SendBulkStrArr(str_arr);
|
||||
}
|
||||
|
||||
DVLOG(1) << "result for " << tx->DebugId() << " is " << popped_key.status();
|
||||
|
@ -1202,7 +1321,7 @@ void ListFamily::LRange(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
}
|
||||
|
||||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
rb->SendStringArr(*res);
|
||||
rb->SendBulkStrArr(*res);
|
||||
}
|
||||
|
||||
// lrem key 5 foo, will remove foo elements from the list if exists at most 5 times.
|
||||
|
|
|
@ -33,7 +33,7 @@ class ListFamilyTest : public BaseFamilyTest {
|
|||
static unsigned NumWatched() {
|
||||
atomic_uint32_t sum{0};
|
||||
|
||||
auto ns = &namespaces.GetDefaultNamespace();
|
||||
auto ns = &namespaces->GetDefaultNamespace();
|
||||
shard_set->RunBriefInParallel([&](EngineShard* es) {
|
||||
auto* bc = ns->GetBlockingController(es->shard_id());
|
||||
if (bc)
|
||||
|
@ -45,7 +45,7 @@ class ListFamilyTest : public BaseFamilyTest {
|
|||
|
||||
static bool HasAwakened() {
|
||||
atomic_uint32_t sum{0};
|
||||
auto ns = &namespaces.GetDefaultNamespace();
|
||||
auto ns = &namespaces->GetDefaultNamespace();
|
||||
shard_set->RunBriefInParallel([&](EngineShard* es) {
|
||||
auto* bc = ns->GetBlockingController(es->shard_id());
|
||||
if (bc)
|
||||
|
@ -533,6 +533,7 @@ TEST_F(ListFamilyTest, LMove) {
|
|||
|
||||
resp = Run({"lmove", kKey1, kKey2, "LEFT", "RIGHT"});
|
||||
ASSERT_THAT(resp, "1");
|
||||
ASSERT_THAT(Run({"llen", kKey1}), IntArg(4));
|
||||
|
||||
resp = Run({"lmove", kKey1, kKey2, "LEFT", "LEFT"});
|
||||
ASSERT_THAT(resp, "2");
|
||||
|
|
|
@ -286,24 +286,16 @@ class InterpreterReplier : public RedisReplyBuilder {
|
|||
}
|
||||
|
||||
void SendError(std::string_view str, std::string_view type = std::string_view{}) final;
|
||||
void SendStored() final;
|
||||
|
||||
void SendBulkString(std::string_view str) final;
|
||||
void SendSimpleString(std::string_view str) final;
|
||||
void SendMGetResponse(MGetResponse resp) final;
|
||||
void SendSimpleStrArr(StrSpan arr) final;
|
||||
|
||||
void SendNullArray() final;
|
||||
|
||||
void SendStringArr(StrSpan arr, CollectionType type) final;
|
||||
void SendNull() final;
|
||||
|
||||
void SendLong(long val) final;
|
||||
void SendDouble(double val) final;
|
||||
|
||||
void SendBulkString(std::string_view str) final;
|
||||
|
||||
void StartCollection(unsigned len, CollectionType type) final;
|
||||
void SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) final;
|
||||
|
||||
private:
|
||||
void PostItem();
|
||||
|
@ -398,11 +390,6 @@ void InterpreterReplier::SendError(string_view str, std::string_view type) {
|
|||
explr_->OnError(str);
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendStored() {
|
||||
DCHECK(array_len_.empty());
|
||||
SendSimpleString("OK");
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendSimpleString(string_view str) {
|
||||
if (array_len_.empty())
|
||||
explr_->OnStatus(str);
|
||||
|
@ -411,40 +398,11 @@ void InterpreterReplier::SendSimpleString(string_view str) {
|
|||
PostItem();
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendMGetResponse(MGetResponse resp) {
|
||||
DCHECK(array_len_.empty());
|
||||
|
||||
explr_->OnArrayStart(resp.resp_arr.size());
|
||||
for (uint32_t i = 0; i < resp.resp_arr.size(); ++i) {
|
||||
if (resp.resp_arr[i].has_value()) {
|
||||
explr_->OnString(resp.resp_arr[i]->value);
|
||||
} else {
|
||||
explr_->OnNil();
|
||||
}
|
||||
}
|
||||
explr_->OnArrayEnd();
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendSimpleStrArr(StrSpan arr) {
|
||||
explr_->OnArrayStart(arr.Size());
|
||||
for (string_view str : arr)
|
||||
explr_->OnString(str);
|
||||
explr_->OnArrayEnd();
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendNullArray() {
|
||||
SendSimpleStrArr(ArgSlice{});
|
||||
PostItem();
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendStringArr(StrSpan arr, CollectionType) {
|
||||
explr_->OnArrayStart(arr.Size());
|
||||
for (string_view str : arr)
|
||||
explr_->OnString(str);
|
||||
explr_->OnArrayEnd();
|
||||
PostItem();
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendNull() {
|
||||
explr_->OnNil();
|
||||
PostItem();
|
||||
|
@ -465,7 +423,9 @@ void InterpreterReplier::SendBulkString(string_view str) {
|
|||
PostItem();
|
||||
}
|
||||
|
||||
void InterpreterReplier::StartCollection(unsigned len, CollectionType) {
|
||||
void InterpreterReplier::StartCollection(unsigned len, CollectionType type) {
|
||||
if (type == MAP)
|
||||
len *= 2;
|
||||
explr_->OnArrayStart(len);
|
||||
|
||||
if (len == 0) {
|
||||
|
@ -477,31 +437,6 @@ void InterpreterReplier::StartCollection(unsigned len, CollectionType) {
|
|||
}
|
||||
}
|
||||
|
||||
void InterpreterReplier::SendScoredArray(absl::Span<const std::pair<std::string, double>> arr,
|
||||
bool with_scores) {
|
||||
if (with_scores) {
|
||||
if (IsResp3()) {
|
||||
StartCollection(arr.size(), CollectionType::ARRAY);
|
||||
for (size_t i = 0; i < arr.size(); ++i) {
|
||||
StartArray(2);
|
||||
SendBulkString(arr[i].first);
|
||||
SendDouble(arr[i].second);
|
||||
}
|
||||
} else {
|
||||
StartCollection(arr.size() * 2, CollectionType::ARRAY);
|
||||
for (size_t i = 0; i < arr.size(); ++i) {
|
||||
SendBulkString(arr[i].first);
|
||||
SendDouble(arr[i].second);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
StartCollection(arr.size(), CollectionType::ARRAY);
|
||||
for (size_t i = 0; i < arr.size(); ++i) {
|
||||
SendBulkString(arr[i].first);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool IsSHA(string_view str) {
|
||||
for (auto c : str) {
|
||||
if (!absl::ascii_isxdigit(c))
|
||||
|
@ -533,7 +468,7 @@ void Topkeys(const http::QueryArgs& args, HttpContext* send) {
|
|||
|
||||
shard_set->RunBriefInParallel([&](EngineShard* shard) {
|
||||
for (const auto& db :
|
||||
namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id()).databases()) {
|
||||
namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id()).databases()) {
|
||||
if (db->top_keys.IsEnabled()) {
|
||||
is_enabled = true;
|
||||
for (const auto& [key, count] : db->top_keys.GetTopKeys()) {
|
||||
|
@ -684,32 +619,33 @@ void ClusterHtmlPage(const http::QueryArgs& args, HttpContext* send,
|
|||
send->Invoke(std::move(resp));
|
||||
}
|
||||
|
||||
enum class ExecEvalState {
|
||||
enum class ExecScriptUse {
|
||||
NONE = 0,
|
||||
ALL = 1,
|
||||
SOME = 2,
|
||||
SCRIPT_LOAD = 1,
|
||||
SCRIPT_RUN = 2,
|
||||
};
|
||||
|
||||
ExecEvalState DetermineEvalPresense(const std::vector<StoredCmd>& body) {
|
||||
unsigned eval_cnt = 0;
|
||||
ExecScriptUse DetermineScriptPresense(const std::vector<StoredCmd>& body) {
|
||||
bool script_load = false;
|
||||
for (const auto& scmd : body) {
|
||||
if (CO::IsEvalKind(scmd.Cid()->name())) {
|
||||
eval_cnt++;
|
||||
return ExecScriptUse::SCRIPT_RUN;
|
||||
}
|
||||
|
||||
if ((scmd.Cid()->name() == "SCRIPT") && (absl::AsciiStrToUpper(scmd.FirstArg()) == "LOAD")) {
|
||||
script_load = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (eval_cnt == 0)
|
||||
return ExecEvalState::NONE;
|
||||
if (script_load)
|
||||
return ExecScriptUse::SCRIPT_LOAD;
|
||||
|
||||
if (eval_cnt == body.size())
|
||||
return ExecEvalState::ALL;
|
||||
|
||||
return ExecEvalState::SOME;
|
||||
return ExecScriptUse::NONE;
|
||||
}
|
||||
|
||||
// Returns the multi mode for that transaction. Returns NOT_DETERMINED if no scheduling
|
||||
// is required.
|
||||
Transaction::MultiMode DeduceExecMode(ExecEvalState state,
|
||||
Transaction::MultiMode DeduceExecMode(ExecScriptUse state,
|
||||
const ConnectionState::ExecInfo& exec_info,
|
||||
const ScriptMgr& script_mgr) {
|
||||
// Check if script most LIKELY has global eval transactions
|
||||
|
@ -717,7 +653,7 @@ Transaction::MultiMode DeduceExecMode(ExecEvalState state,
|
|||
Transaction::MultiMode multi_mode =
|
||||
static_cast<Transaction::MultiMode>(absl::GetFlag(FLAGS_multi_exec_mode));
|
||||
|
||||
if (state != ExecEvalState::NONE) {
|
||||
if (state == ExecScriptUse::SCRIPT_RUN) {
|
||||
contains_global = script_mgr.AreGlobalByDefault();
|
||||
}
|
||||
|
||||
|
@ -765,50 +701,6 @@ string CreateExecDescriptor(const std::vector<StoredCmd>& stored_cmds, unsigned
|
|||
return result;
|
||||
}
|
||||
|
||||
// Ensures availability of an interpreter for EVAL-like commands and it's automatic release.
|
||||
// If it's part of MULTI, the preborrowed interpreter is returned, otherwise a new is acquired.
|
||||
struct BorrowedInterpreter {
|
||||
BorrowedInterpreter(Transaction* tx, ConnectionContext* cntx) {
|
||||
// Ensure squashing ignores EVAL. We can't run on a stub context, because it doesn't have our
|
||||
// preborrowed interpreter (which can't be shared on multiple threads).
|
||||
CHECK(!cntx->conn_state.squashing_info);
|
||||
|
||||
if (auto borrowed = cntx->conn_state.exec_info.preborrowed_interpreter; borrowed) {
|
||||
// Ensure a preborrowed interpreter is only set for an already running MULTI transaction.
|
||||
CHECK_EQ(cntx->conn_state.exec_info.state, ConnectionState::ExecInfo::EXEC_RUNNING);
|
||||
|
||||
interpreter_ = borrowed;
|
||||
} else {
|
||||
// A scheduled transaction occupies a place in the transaction queue and holds locks,
|
||||
// preventing other transactions from progressing. Blocking below can deadlock!
|
||||
CHECK(!tx->IsScheduled());
|
||||
|
||||
interpreter_ = ServerState::tlocal()->BorrowInterpreter();
|
||||
owned_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
~BorrowedInterpreter() {
|
||||
if (owned_)
|
||||
ServerState::tlocal()->ReturnInterpreter(interpreter_);
|
||||
}
|
||||
|
||||
// Give up ownership of the interpreter, it must be returned manually.
|
||||
Interpreter* Release() && {
|
||||
DCHECK(owned_);
|
||||
owned_ = false;
|
||||
return interpreter_;
|
||||
}
|
||||
|
||||
operator Interpreter*() {
|
||||
return interpreter_;
|
||||
}
|
||||
|
||||
private:
|
||||
Interpreter* interpreter_ = nullptr;
|
||||
bool owned_ = false;
|
||||
};
|
||||
|
||||
string ConnectionLogContext(const facade::Connection* conn) {
|
||||
if (conn == nullptr) {
|
||||
return "(null-conn)";
|
||||
|
@ -931,7 +823,7 @@ void Service::Init(util::AcceptServer* acceptor, std::vector<facade::Listener*>
|
|||
auto* shard = EngineShard::tlocal();
|
||||
if (shard) {
|
||||
auto shard_id = shard->shard_id();
|
||||
auto& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard_id);
|
||||
auto& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard_id);
|
||||
db_slice.SetNotifyKeyspaceEvents(*res);
|
||||
}
|
||||
});
|
||||
|
@ -1005,7 +897,6 @@ void Service::Shutdown() {
|
|||
ChannelStore::Destroy();
|
||||
|
||||
shard_set->PreShutdown();
|
||||
namespaces.Clear();
|
||||
shard_set->Shutdown();
|
||||
Transaction::Shutdown();
|
||||
|
||||
|
@ -1232,10 +1123,10 @@ std::optional<ErrorReply> Service::VerifyCommandState(const CommandId* cid, CmdA
|
|||
|
||||
void Service::DispatchCommand(ArgSlice args, SinkReplyBuilder* builder,
|
||||
facade::ConnectionContext* cntx) {
|
||||
absl::Cleanup clear_last_error([builder]() { std::ignore = builder->ConsumeLastError(); });
|
||||
DCHECK(!args.empty());
|
||||
DCHECK_NE(0u, shard_set->size()) << "Init was not called";
|
||||
|
||||
absl::Cleanup clear_last_error([builder]() { builder->ConsumeLastError(); });
|
||||
ServerState& etl = *ServerState::tlocal();
|
||||
|
||||
string cmd = absl::AsciiStrToUpper(args[0]);
|
||||
|
@ -1348,23 +1239,26 @@ class ReplyGuard {
|
|||
const bool is_script = bool(cntx->conn_state.script_info);
|
||||
const bool is_one_of =
|
||||
absl::flat_hash_set<std::string_view>({"REPLCONF", "DFLY"}).contains(cid_name);
|
||||
bool is_mcache = builder->type() == SinkReplyBuilder::MC;
|
||||
bool is_mcache = builder->GetProtocol() == Protocol::MEMCACHE;
|
||||
const bool is_no_reply_memcache =
|
||||
is_mcache && (static_cast<MCReplyBuilder*>(builder)->NoReply() || cid_name == "QUIT");
|
||||
const bool should_dcheck = !is_one_of && !is_script && !is_no_reply_memcache;
|
||||
if (should_dcheck) {
|
||||
builder_ = builder;
|
||||
builder_->ExpectReply();
|
||||
replies_recorded_ = builder_->RepliesRecorded();
|
||||
}
|
||||
}
|
||||
|
||||
~ReplyGuard() {
|
||||
if (builder_) {
|
||||
DCHECK(builder_->HasReplied());
|
||||
DCHECK_GT(builder_->RepliesRecorded(), replies_recorded_)
|
||||
<< cid_name_ << " " << typeid(*builder_).name();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
size_t replies_recorded_ = 0;
|
||||
std::string_view cid_name_;
|
||||
SinkReplyBuilder* builder_ = nullptr;
|
||||
};
|
||||
|
||||
|
@ -1402,7 +1296,7 @@ bool Service::InvokeCmd(const CommandId* cid, CmdArgList tail_args, SinkReplyBui
|
|||
return true;
|
||||
}
|
||||
builder->SendError(std::move(*err));
|
||||
std::ignore = builder->ConsumeLastError();
|
||||
builder->ConsumeLastError();
|
||||
return true; // return false only for internal error aborts
|
||||
}
|
||||
|
||||
|
@ -1445,9 +1339,7 @@ bool Service::InvokeCmd(const CommandId* cid, CmdArgList tail_args, SinkReplyBui
|
|||
return false;
|
||||
}
|
||||
|
||||
std::string reason = builder->ConsumeLastError();
|
||||
|
||||
if (!reason.empty()) {
|
||||
if (std::string reason = builder->ConsumeLastError(); !reason.empty()) {
|
||||
VLOG(2) << FailedCommandToString(cid->name(), tail_args, reason);
|
||||
LOG_EVERY_T(WARNING, 1) << FailedCommandToString(cid->name(), tail_args, reason);
|
||||
}
|
||||
|
@ -1493,7 +1385,7 @@ size_t Service::DispatchManyCommands(absl::Span<CmdArgList> args_list, SinkReply
|
|||
facade::ConnectionContext* cntx) {
|
||||
ConnectionContext* dfly_cntx = static_cast<ConnectionContext*>(cntx);
|
||||
DCHECK(!dfly_cntx->conn_state.exec_info.IsRunning());
|
||||
DCHECK_EQ(builder->type(), SinkReplyBuilder::REDIS);
|
||||
DCHECK_EQ(builder->GetProtocol(), Protocol::REDIS);
|
||||
|
||||
vector<StoredCmd> stored_cmds;
|
||||
intrusive_ptr<Transaction> dist_trans;
|
||||
|
@ -1689,13 +1581,12 @@ bool RequirePrivilegedAuth() {
|
|||
return !GetFlag(FLAGS_admin_nopass);
|
||||
}
|
||||
|
||||
facade::ConnectionContext* Service::CreateContext(util::FiberSocketBase* peer,
|
||||
facade::Connection* owner) {
|
||||
facade::ConnectionContext* Service::CreateContext(facade::Connection* owner) {
|
||||
auto cred = user_registry_.GetCredentials("default");
|
||||
ConnectionContext* res = new ConnectionContext{peer, owner, std::move(cred)};
|
||||
res->ns = &namespaces.GetOrInsert("");
|
||||
ConnectionContext* res = new ConnectionContext{owner, std::move(cred)};
|
||||
res->ns = &namespaces->GetOrInsert("");
|
||||
|
||||
if (peer->IsUDS()) {
|
||||
if (owner->socket()->IsUDS()) {
|
||||
res->req_auth = false;
|
||||
res->skip_acl_validation = true;
|
||||
} else if (owner->IsPrivileged() && RequirePrivilegedAuth()) {
|
||||
|
@ -1744,9 +1635,9 @@ absl::flat_hash_map<std::string, unsigned> Service::UknownCmdMap() const {
|
|||
|
||||
void Service::Quit(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
ConnectionContext* cntx) {
|
||||
if (builder->type() == SinkReplyBuilder::REDIS)
|
||||
if (builder->GetProtocol() == Protocol::REDIS)
|
||||
builder->SendOk();
|
||||
using facade::SinkReplyBuilder;
|
||||
|
||||
builder->CloseConnection();
|
||||
|
||||
DeactivateMonitoring(static_cast<ConnectionContext*>(cntx));
|
||||
|
@ -1873,7 +1764,7 @@ void Service::Eval(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
|||
return rb->SendNull();
|
||||
}
|
||||
|
||||
BorrowedInterpreter interpreter{tx, cntx};
|
||||
BorrowedInterpreter interpreter{tx, &cntx->conn_state};
|
||||
auto res = server_family_.script_mgr()->Insert(body, interpreter);
|
||||
if (!res)
|
||||
return builder->SendError(res.error().Format(), facade::kScriptErrType);
|
||||
|
@ -1887,7 +1778,7 @@ void Service::EvalSha(CmdArgList args, Transaction* tx, SinkReplyBuilder* builde
|
|||
ConnectionContext* cntx) {
|
||||
string sha = absl::AsciiStrToLower(ArgS(args, 0));
|
||||
|
||||
BorrowedInterpreter interpreter{cntx->transaction, cntx};
|
||||
BorrowedInterpreter interpreter{cntx->transaction, &cntx->conn_state};
|
||||
CallSHA(args, sha, interpreter, builder, cntx);
|
||||
}
|
||||
|
||||
|
@ -2254,12 +2145,13 @@ void Service::Exec(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
|||
|
||||
cntx->last_command_debug.exec_body_len = exec_info.body.size();
|
||||
|
||||
// The transaction can contain scripts, determine their presence ahead to customize logic below.
|
||||
ExecEvalState state = DetermineEvalPresense(exec_info.body);
|
||||
// The transaction can contain script load script execution, determine their presence ahead to
|
||||
// customize logic below.
|
||||
ExecScriptUse state = DetermineScriptPresense(exec_info.body);
|
||||
|
||||
// We borrow a single interpreter for all the EVALs inside. Returned by MultiCleanup
|
||||
if (state != ExecEvalState::NONE) {
|
||||
exec_info.preborrowed_interpreter = BorrowedInterpreter(tx, cntx).Release();
|
||||
// We borrow a single interpreter for all the EVALs/Script load inside. Returned by MultiCleanup
|
||||
if (state != ExecScriptUse::NONE) {
|
||||
exec_info.preborrowed_interpreter = BorrowedInterpreter(tx, &cntx->conn_state).Release();
|
||||
}
|
||||
|
||||
// Determine according multi mode, not only only flag, but based on presence of global commands
|
||||
|
@ -2293,7 +2185,7 @@ void Service::Exec(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
|||
ServerState::tlocal()->exec_freq_count[descr]++;
|
||||
}
|
||||
|
||||
if (absl::GetFlag(FLAGS_multi_exec_squash) && state == ExecEvalState::NONE &&
|
||||
if (absl::GetFlag(FLAGS_multi_exec_squash) && state != ExecScriptUse::SCRIPT_RUN &&
|
||||
!cntx->conn_state.tracking_info_.IsTrackingOn()) {
|
||||
MultiCommandSquasher::Execute(absl::MakeSpan(exec_info.body), rb, cntx, this);
|
||||
} else {
|
||||
|
@ -2402,7 +2294,7 @@ void Service::Function(CmdArgList args, Transaction* tx, SinkReplyBuilder* build
|
|||
|
||||
void Service::PubsubChannels(string_view pattern, SinkReplyBuilder* builder) {
|
||||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
rb->SendStringArr(ServerState::tlocal()->channel_store()->ListChannels(pattern));
|
||||
rb->SendBulkStrArr(ServerState::tlocal()->channel_store()->ListChannels(pattern));
|
||||
}
|
||||
|
||||
void Service::PubsubPatterns(SinkReplyBuilder* builder) {
|
||||
|
@ -2555,7 +2447,7 @@ void Service::Command(CmdArgList args, Transaction* tx, SinkReplyBuilder* builde
|
|||
VarzValue::Map Service::GetVarzStats() {
|
||||
VarzValue::Map res;
|
||||
|
||||
Metrics m = server_family_.GetMetrics(&namespaces.GetDefaultNamespace());
|
||||
Metrics m = server_family_.GetMetrics(&namespaces->GetDefaultNamespace());
|
||||
DbStats db_stats;
|
||||
for (const auto& s : m.db_stats) {
|
||||
db_stats += s;
|
||||
|
|
|
@ -62,8 +62,7 @@ class Service : public facade::ServiceInterface {
|
|||
void DispatchMC(const MemcacheParser::Command& cmd, std::string_view value,
|
||||
facade::MCReplyBuilder* builder, facade::ConnectionContext* cntx) final;
|
||||
|
||||
facade::ConnectionContext* CreateContext(util::FiberSocketBase* peer,
|
||||
facade::Connection* owner) final;
|
||||
facade::ConnectionContext* CreateContext(facade::Connection* owner) final;
|
||||
|
||||
const CommandId* FindCmd(std::string_view) const;
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ bool MultiCommandSquasher::ExecuteStandalone(facade::RedisReplyBuilder* rb, Stor
|
|||
if (verify_commands_) {
|
||||
if (auto err = service_->VerifyCommandState(cmd->Cid(), args, *cntx_); err) {
|
||||
rb->SendError(std::move(*err));
|
||||
std::ignore = rb->ConsumeLastError();
|
||||
rb->ConsumeLastError();
|
||||
return !error_abort_;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1147,6 +1147,29 @@ TEST_F(MultiEvalTest, MultiAndEval) {
|
|||
Run({"eval", "return 'OK';", "0"});
|
||||
auto resp = Run({"exec"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
// We had a bug running script load inside multi
|
||||
Run({"multi"});
|
||||
Run({"script", "load", "return '5'"});
|
||||
Run({"exec"});
|
||||
|
||||
Run({"multi"});
|
||||
Run({"script", "load", "return '5'"});
|
||||
Run({"get", "x"});
|
||||
Run({"exec"});
|
||||
|
||||
Run({"multi"});
|
||||
Run({"script", "load", "return '5'"});
|
||||
Run({"mset", "x1", "y1", "x2", "y2"});
|
||||
Run({"exec"});
|
||||
|
||||
Run({"multi"});
|
||||
Run({"script", "load", "return '5'"});
|
||||
Run({"eval", "return redis.call('set', 'x', 'y')", "1", "x"});
|
||||
Run({"get", "x"});
|
||||
Run({"exec"});
|
||||
|
||||
Run({"get", "x"});
|
||||
}
|
||||
|
||||
TEST_F(MultiTest, MultiTypes) {
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
// Copyright 2024, DragonflyDB authors. All rights reserved.
|
||||
// See LICENSE for licensing terms.
|
||||
//
|
||||
|
||||
#include "server/namespaces.h"
|
||||
|
||||
#include "base/flags.h"
|
||||
|
@ -45,19 +49,12 @@ BlockingController* Namespace::GetBlockingController(ShardId sid) {
|
|||
return shard_blocking_controller_[sid].get();
|
||||
}
|
||||
|
||||
Namespaces namespaces;
|
||||
|
||||
Namespaces::~Namespaces() {
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Namespaces::Init() {
|
||||
DCHECK(default_namespace_ == nullptr);
|
||||
Namespaces::Namespaces() {
|
||||
default_namespace_ = &GetOrInsert("");
|
||||
}
|
||||
|
||||
bool Namespaces::IsInitialized() const {
|
||||
return default_namespace_ != nullptr;
|
||||
Namespaces::~Namespaces() {
|
||||
Clear();
|
||||
}
|
||||
|
||||
void Namespaces::Clear() {
|
||||
|
|
|
@ -49,11 +49,9 @@ class Namespace {
|
|||
// mutual dependencies.
|
||||
class Namespaces {
|
||||
public:
|
||||
Namespaces() = default;
|
||||
Namespaces();
|
||||
~Namespaces();
|
||||
|
||||
void Init();
|
||||
bool IsInitialized() const;
|
||||
void Clear() ABSL_LOCKS_EXCLUDED(mu_); // Thread unsafe, use in tear-down or tests
|
||||
|
||||
Namespace& GetDefaultNamespace() const; // No locks
|
||||
|
@ -65,6 +63,4 @@ class Namespaces {
|
|||
Namespace* default_namespace_ = nullptr;
|
||||
};
|
||||
|
||||
extern Namespaces namespaces;
|
||||
|
||||
} // namespace dfly
|
||||
|
|
|
@ -384,7 +384,7 @@ bool ProtocolClient::CheckRespFirstTypes(initializer_list<RespExpr::Type> types)
|
|||
}
|
||||
|
||||
error_code ProtocolClient::SendCommand(string_view command) {
|
||||
string formatted_command = RedisReplyBuilder2Base::SerializeCommand(command);
|
||||
string formatted_command = RedisReplyBuilderBase::SerializeCommand(command);
|
||||
auto ec = sock_->Write(io::Buffer(formatted_command));
|
||||
if (!ec)
|
||||
TouchIoTime();
|
||||
|
|
|
@ -32,6 +32,7 @@ extern "C" {
|
|||
#include "base/logging.h"
|
||||
#include "core/bloom.h"
|
||||
#include "core/json/json_object.h"
|
||||
#include "core/qlist.h"
|
||||
#include "core/sorted_map.h"
|
||||
#include "core/string_map.h"
|
||||
#include "core/string_set.h"
|
||||
|
@ -57,7 +58,7 @@ extern "C" {
|
|||
ABSL_DECLARE_FLAG(int32_t, list_max_listpack_size);
|
||||
ABSL_DECLARE_FLAG(int32_t, list_compress_depth);
|
||||
ABSL_DECLARE_FLAG(uint32_t, dbnum);
|
||||
|
||||
ABSL_DECLARE_FLAG(bool, list_experimental_v2);
|
||||
namespace dfly {
|
||||
|
||||
using namespace std;
|
||||
|
@ -709,20 +710,34 @@ void RdbLoaderBase::OpaqueObjLoader::CreateHMap(const LoadTrace* ltrace) {
|
|||
}
|
||||
|
||||
void RdbLoaderBase::OpaqueObjLoader::CreateList(const LoadTrace* ltrace) {
|
||||
quicklist* ql;
|
||||
quicklist* ql = nullptr;
|
||||
QList* qlv2 = nullptr;
|
||||
if (config_.append) {
|
||||
if (!EnsureObjEncoding(OBJ_LIST, OBJ_ENCODING_QUICKLIST)) {
|
||||
if (pv_->ObjType() != OBJ_LIST) {
|
||||
ec_ = RdbError(errc::invalid_rdb_type);
|
||||
return;
|
||||
}
|
||||
|
||||
ql = static_cast<quicklist*>(pv_->RObjPtr());
|
||||
if (pv_->Encoding() == OBJ_ENCODING_QUICKLIST) {
|
||||
ql = static_cast<quicklist*>(pv_->RObjPtr());
|
||||
} else {
|
||||
DCHECK_EQ(pv_->Encoding(), kEncodingQL2);
|
||||
qlv2 = static_cast<QList*>(pv_->RObjPtr());
|
||||
}
|
||||
} else {
|
||||
ql = quicklistNew(GetFlag(FLAGS_list_max_listpack_size), GetFlag(FLAGS_list_compress_depth));
|
||||
if (absl::GetFlag(FLAGS_list_experimental_v2)) {
|
||||
qlv2 = CompactObj::AllocateMR<QList>(GetFlag(FLAGS_list_max_listpack_size),
|
||||
GetFlag(FLAGS_list_compress_depth));
|
||||
} else {
|
||||
ql = quicklistNew(GetFlag(FLAGS_list_max_listpack_size), GetFlag(FLAGS_list_compress_depth));
|
||||
}
|
||||
}
|
||||
|
||||
auto cleanup = absl::Cleanup([&] {
|
||||
if (!config_.append) {
|
||||
quicklistRelease(ql);
|
||||
if (ql)
|
||||
quicklistRelease(ql);
|
||||
else
|
||||
CompactObj::DeleteMR<QList>(qlv2);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -737,7 +752,11 @@ void RdbLoaderBase::OpaqueObjLoader::CreateList(const LoadTrace* ltrace) {
|
|||
if (container == QUICKLIST_NODE_CONTAINER_PLAIN) {
|
||||
lp = (uint8_t*)zmalloc(sv.size());
|
||||
::memcpy(lp, (uint8_t*)sv.data(), sv.size());
|
||||
quicklistAppendPlainNode(ql, lp, sv.size());
|
||||
if (ql)
|
||||
quicklistAppendPlainNode(ql, lp, sv.size());
|
||||
else
|
||||
qlv2->AppendPlain(lp, sv.size());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -774,13 +793,16 @@ void RdbLoaderBase::OpaqueObjLoader::CreateList(const LoadTrace* ltrace) {
|
|||
lp = lpShrinkToFit(lp);
|
||||
}
|
||||
|
||||
quicklistAppendListpack(ql, lp);
|
||||
if (ql)
|
||||
quicklistAppendListpack(ql, lp);
|
||||
else
|
||||
qlv2->AppendListpack(lp);
|
||||
return true;
|
||||
});
|
||||
|
||||
if (ec_)
|
||||
return;
|
||||
if (quicklistCount(ql) == 0) {
|
||||
if ((ql && quicklistCount(ql) == 0) || (qlv2 && qlv2->Size() == 0)) {
|
||||
ec_ = RdbError(errc::empty_key);
|
||||
return;
|
||||
}
|
||||
|
@ -788,7 +810,10 @@ void RdbLoaderBase::OpaqueObjLoader::CreateList(const LoadTrace* ltrace) {
|
|||
std::move(cleanup).Cancel();
|
||||
|
||||
if (!config_.append) {
|
||||
pv_->InitRobj(OBJ_LIST, OBJ_ENCODING_QUICKLIST, ql);
|
||||
if (ql)
|
||||
pv_->InitRobj(OBJ_LIST, OBJ_ENCODING_QUICKLIST, ql);
|
||||
else
|
||||
pv_->InitRobj(OBJ_LIST, kEncodingQL2, qlv2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2258,7 +2283,7 @@ error_code RdbLoader::Load(io::Source* src) {
|
|||
|
||||
// Active database if not existed before.
|
||||
shard_set->Add(
|
||||
i, [dbid] { namespaces.GetDefaultNamespace().GetCurrentDbSlice().ActivateDb(dbid); });
|
||||
i, [dbid] { namespaces->GetDefaultNamespace().GetCurrentDbSlice().ActivateDb(dbid); });
|
||||
}
|
||||
|
||||
cur_db_index_ = dbid;
|
||||
|
@ -2656,7 +2681,7 @@ std::error_code RdbLoaderBase::FromOpaque(const OpaqueObj& opaque, LoadConfig co
|
|||
|
||||
void RdbLoader::LoadItemsBuffer(DbIndex db_ind, const ItemsBuf& ib) {
|
||||
EngineShard* es = EngineShard::tlocal();
|
||||
DbContext db_cntx{&namespaces.GetDefaultNamespace(), db_ind, GetCurrentTimeMs()};
|
||||
DbContext db_cntx{&namespaces->GetDefaultNamespace(), db_ind, GetCurrentTimeMs()};
|
||||
DbSlice& db_slice = db_cntx.GetDbSlice(es->shard_id());
|
||||
|
||||
auto error_msg = [](const auto* item, auto db_ind) {
|
||||
|
@ -2860,7 +2885,7 @@ void RdbLoader::LoadSearchIndexDefFromAux(string&& def) {
|
|||
cntx.is_replicating = true;
|
||||
cntx.journal_emulated = true;
|
||||
cntx.skip_acl_validation = true;
|
||||
cntx.ns = &namespaces.GetDefaultNamespace();
|
||||
cntx.ns = &namespaces->GetDefaultNamespace();
|
||||
|
||||
uint32_t consumed = 0;
|
||||
facade::RespVec resp_vec;
|
||||
|
@ -2897,7 +2922,7 @@ void RdbLoader::PerformPostLoad(Service* service) {
|
|||
// Rebuild all search indices as only their definitions are extracted from the snapshot
|
||||
shard_set->AwaitRunningOnShardQueue([](EngineShard* es) {
|
||||
es->search_indices()->RebuildAllIndices(
|
||||
OpArgs{es, nullptr, DbContext{&namespaces.GetDefaultNamespace(), 0, GetCurrentTimeMs()}});
|
||||
OpArgs{es, nullptr, DbContext{&namespaces->GetDefaultNamespace(), 0, GetCurrentTimeMs()}});
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ extern "C" {
|
|||
#include "base/logging.h"
|
||||
#include "core/bloom.h"
|
||||
#include "core/json/json_object.h"
|
||||
#include "core/qlist.h"
|
||||
#include "core/size_tracking_channel.h"
|
||||
#include "core/sorted_map.h"
|
||||
#include "core/string_map.h"
|
||||
|
@ -168,12 +169,10 @@ uint8_t RdbObjectType(const PrimeValue& pv) {
|
|||
case OBJ_STRING:
|
||||
return RDB_TYPE_STRING;
|
||||
case OBJ_LIST:
|
||||
if (compact_enc == OBJ_ENCODING_QUICKLIST) {
|
||||
if (absl::GetFlag(FLAGS_list_rdb_encode_v2))
|
||||
return RDB_TYPE_LIST_QUICKLIST_2;
|
||||
return RDB_TYPE_LIST_QUICKLIST;
|
||||
if (compact_enc == OBJ_ENCODING_QUICKLIST || compact_enc == kEncodingQL2) {
|
||||
return absl::GetFlag(FLAGS_list_rdb_encode_v2) ? RDB_TYPE_LIST_QUICKLIST_2
|
||||
: RDB_TYPE_LIST_QUICKLIST;
|
||||
}
|
||||
|
||||
break;
|
||||
case OBJ_SET:
|
||||
if (compact_enc == kEncodingIntSet)
|
||||
|
@ -436,12 +435,21 @@ error_code RdbSerializer::SaveObject(const PrimeValue& pv) {
|
|||
|
||||
error_code RdbSerializer::SaveListObject(const PrimeValue& pv) {
|
||||
/* Save a list value */
|
||||
DCHECK_EQ(OBJ_ENCODING_QUICKLIST, pv.Encoding());
|
||||
const quicklist* ql = reinterpret_cast<const quicklist*>(pv.RObjPtr());
|
||||
quicklistNode* node = ql->head;
|
||||
DVLOG(2) << "Saving list of length " << ql->len;
|
||||
size_t len = 0;
|
||||
const quicklistNode* node = nullptr;
|
||||
|
||||
RETURN_ON_ERR(SaveLen(ql->len));
|
||||
if (pv.Encoding() == OBJ_ENCODING_QUICKLIST) {
|
||||
const quicklist* ql = reinterpret_cast<const quicklist*>(pv.RObjPtr());
|
||||
node = ql->head;
|
||||
DVLOG(2) << "Saving list of length " << ql->len;
|
||||
len = ql->len;
|
||||
} else {
|
||||
DCHECK_EQ(pv.Encoding(), kEncodingQL2);
|
||||
QList* ql = reinterpret_cast<QList*>(pv.RObjPtr());
|
||||
node = ql->Head();
|
||||
len = ql->node_count();
|
||||
}
|
||||
RETURN_ON_ERR(SaveLen(len));
|
||||
|
||||
while (node) {
|
||||
DVLOG(3) << "QL node (encoding/container/sz): " << node->encoding << "/" << node->container
|
||||
|
@ -759,7 +767,7 @@ error_code RdbSerializer::SaveListPackAsZiplist(uint8_t* lp) {
|
|||
return ec;
|
||||
}
|
||||
|
||||
error_code RdbSerializer::SavePlainNodeAsZiplist(quicklistNode* node) {
|
||||
error_code RdbSerializer::SavePlainNodeAsZiplist(const quicklistNode* node) {
|
||||
uint8_t* zl = ziplistNew();
|
||||
zl = ziplistPush(zl, node->entry, node->sz, ZIPLIST_TAIL);
|
||||
|
||||
|
@ -1319,7 +1327,7 @@ void RdbSaver::Impl::FinalizeSnapshotWriting() {
|
|||
|
||||
void RdbSaver::Impl::StartSnapshotting(bool stream_journal, Context* cntx, EngineShard* shard) {
|
||||
auto& s = GetSnapshot(shard);
|
||||
auto& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id());
|
||||
auto& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id());
|
||||
auto on_snapshot_finish = std::bind(&RdbSaver::Impl::FinalizeSnapshotWriting, this);
|
||||
auto push_cb = std::bind(&RdbSaver::Impl::PushSnapshotData, this, cntx, std::placeholders::_1);
|
||||
|
||||
|
@ -1333,7 +1341,7 @@ void RdbSaver::Impl::StartSnapshotting(bool stream_journal, Context* cntx, Engin
|
|||
|
||||
void RdbSaver::Impl::StartIncrementalSnapshotting(Context* cntx, EngineShard* shard,
|
||||
LSN start_lsn) {
|
||||
auto& db_slice = namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id());
|
||||
auto& db_slice = namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id());
|
||||
auto& s = GetSnapshot(shard);
|
||||
auto on_finalize_cb = std::bind(&RdbSaver::Impl::FinalizeSnapshotWriting, this);
|
||||
auto push_cb = std::bind(&RdbSaver::Impl::PushSnapshotData, this, cntx, std::placeholders::_1);
|
||||
|
|
|
@ -255,7 +255,7 @@ class RdbSerializer : public SerializerBase {
|
|||
std::error_code SaveListPackAsZiplist(uint8_t* lp);
|
||||
std::error_code SaveStreamPEL(rax* pel, bool nacks);
|
||||
std::error_code SaveStreamConsumers(streamCG* cg);
|
||||
std::error_code SavePlainNodeAsZiplist(quicklistNode* node);
|
||||
std::error_code SavePlainNodeAsZiplist(const quicklistNode* node);
|
||||
|
||||
// Might preempt
|
||||
void FlushIfNeeded(FlushState flush_state);
|
||||
|
|
|
@ -593,11 +593,11 @@ error_code Replica::InitiateDflySync() {
|
|||
|
||||
error_code Replica::ConsumeRedisStream() {
|
||||
base::IoBuf io_buf(16_KB);
|
||||
ConnectionContext conn_context{static_cast<io::Sink*>(nullptr), nullptr, {}};
|
||||
ConnectionContext conn_context{nullptr, {}};
|
||||
conn_context.is_replicating = true;
|
||||
conn_context.journal_emulated = true;
|
||||
conn_context.skip_acl_validation = true;
|
||||
conn_context.ns = &namespaces.GetDefaultNamespace();
|
||||
conn_context.ns = &namespaces->GetDefaultNamespace();
|
||||
|
||||
// we never reply back on the commands.
|
||||
facade::CapturingReplyBuilder null_builder{facade::ReplyMode::NONE};
|
||||
|
|
|
@ -67,7 +67,8 @@ ScriptMgr::ScriptKey::ScriptKey(string_view sha) : array{} {
|
|||
memcpy(data(), sha.data(), size());
|
||||
}
|
||||
|
||||
void ScriptMgr::Run(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) {
|
||||
void ScriptMgr::Run(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
ConnectionContext* cntx) {
|
||||
string subcmd = absl::AsciiStrToUpper(ArgS(args, 0));
|
||||
|
||||
if (subcmd == "HELP") {
|
||||
|
@ -110,7 +111,7 @@ void ScriptMgr::Run(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder)
|
|||
return LatencyCmd(tx, builder);
|
||||
|
||||
if (subcmd == "LOAD" && args.size() == 2)
|
||||
return LoadCmd(args, tx, builder);
|
||||
return LoadCmd(args, tx, builder, cntx);
|
||||
|
||||
if (subcmd == "FLAGS" && args.size() > 2)
|
||||
return ConfigCmd(args, tx, builder);
|
||||
|
@ -144,7 +145,8 @@ void ScriptMgr::FlushCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* bui
|
|||
return builder->SendOk();
|
||||
}
|
||||
|
||||
void ScriptMgr::LoadCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) {
|
||||
void ScriptMgr::LoadCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
ConnectionContext* cntx) {
|
||||
string_view body = ArgS(args, 1);
|
||||
auto rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
if (body.empty()) {
|
||||
|
@ -153,9 +155,7 @@ void ScriptMgr::LoadCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
return rb->SendBulkString(sha);
|
||||
}
|
||||
|
||||
ServerState* ss = ServerState::tlocal();
|
||||
auto interpreter = ss->BorrowInterpreter();
|
||||
absl::Cleanup clean = [ss, interpreter]() { ss->ReturnInterpreter(interpreter); };
|
||||
BorrowedInterpreter interpreter{tx, &cntx->conn_state};
|
||||
|
||||
auto res = Insert(body, interpreter);
|
||||
if (!res)
|
||||
|
|
|
@ -48,7 +48,7 @@ class ScriptMgr {
|
|||
|
||||
ScriptMgr();
|
||||
|
||||
void Run(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder);
|
||||
void Run(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder, ConnectionContext* cntx);
|
||||
|
||||
// Insert script and return sha. Get possible error from compilation or parsing script flags.
|
||||
io::Result<std::string, GenericError> Insert(std::string_view body, Interpreter* interpreter);
|
||||
|
@ -69,7 +69,8 @@ class ScriptMgr {
|
|||
private:
|
||||
void ExistsCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) const;
|
||||
void FlushCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder);
|
||||
void LoadCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder);
|
||||
void LoadCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
ConnectionContext* cntx);
|
||||
void ConfigCmd(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder);
|
||||
void ListCmd(Transaction* tx, SinkReplyBuilder* builder) const;
|
||||
void LatencyCmd(Transaction* tx, SinkReplyBuilder* builder) const;
|
||||
|
|
|
@ -38,43 +38,44 @@ string_view SdsToSafeSv(sds str) {
|
|||
return str != nullptr ? string_view{str, sdslen(str)} : ""sv;
|
||||
}
|
||||
|
||||
search::SortableValue FieldToSortableValue(search::SchemaField::FieldType type, string_view value) {
|
||||
using FieldValue = std::optional<search::SortableValue>;
|
||||
|
||||
FieldValue ToSortableValue(search::SchemaField::FieldType type, string_view value) {
|
||||
if (value.empty()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
if (type == search::SchemaField::NUMERIC) {
|
||||
double value_as_double = 0;
|
||||
if (!absl::SimpleAtod(value, &value_as_double)) { // temporary convert to double
|
||||
auto value_as_double = search::ParseNumericField(value);
|
||||
if (!value_as_double) { // temporary convert to double
|
||||
LOG(DFATAL) << "Failed to convert " << value << " to double";
|
||||
return std::nullopt;
|
||||
}
|
||||
return value_as_double;
|
||||
return value_as_double.value();
|
||||
}
|
||||
if (type == search::SchemaField::VECTOR) {
|
||||
auto [ptr, size] = search::BytesToFtVector(value);
|
||||
auto opt_vector = search::BytesToFtVectorSafe(value);
|
||||
if (!opt_vector) {
|
||||
LOG(DFATAL) << "Failed to convert " << value << " to vector";
|
||||
return std::nullopt;
|
||||
}
|
||||
auto& [ptr, size] = opt_vector.value();
|
||||
return absl::StrCat("[", absl::StrJoin(absl::Span<const float>{ptr.get(), size}, ","), "]");
|
||||
}
|
||||
return string{value};
|
||||
}
|
||||
|
||||
search::SortableValue JsonToSortableValue(const search::SchemaField::FieldType type,
|
||||
const JsonType& json) {
|
||||
if (type == search::SchemaField::NUMERIC) {
|
||||
return json.as_double();
|
||||
}
|
||||
return json.to_string();
|
||||
}
|
||||
|
||||
search::SortableValue ExtractSortableValue(const search::Schema& schema, string_view key,
|
||||
string_view value) {
|
||||
FieldValue ExtractSortableValue(const search::Schema& schema, string_view key, string_view value) {
|
||||
auto it = schema.fields.find(key);
|
||||
if (it == schema.fields.end())
|
||||
return FieldToSortableValue(search::SchemaField::TEXT, value);
|
||||
return FieldToSortableValue(it->second.type, value);
|
||||
return ToSortableValue(search::SchemaField::TEXT, value);
|
||||
return ToSortableValue(it->second.type, value);
|
||||
}
|
||||
|
||||
search::SortableValue ExtractSortableValueFromJson(const search::Schema& schema, string_view key,
|
||||
const JsonType& json) {
|
||||
auto it = schema.fields.find(key);
|
||||
if (it == schema.fields.end())
|
||||
return JsonToSortableValue(search::SchemaField::TEXT, json);
|
||||
return JsonToSortableValue(it->second.type, json);
|
||||
FieldValue ExtractSortableValueFromJson(const search::Schema& schema, string_view key,
|
||||
const JsonType& json) {
|
||||
auto json_as_string = json.to_string();
|
||||
return ExtractSortableValue(schema, key, json_as_string);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -83,7 +84,11 @@ SearchDocData BaseAccessor::Serialize(
|
|||
const search::Schema& schema, absl::Span<const SearchField<std::string_view>> fields) const {
|
||||
SearchDocData out{};
|
||||
for (const auto& [fident, fname] : fields) {
|
||||
out[fname] = ExtractSortableValue(schema, fident, absl::StrJoin(GetStrings(fident), ","));
|
||||
auto field_value =
|
||||
ExtractSortableValue(schema, fident, absl::StrJoin(GetStrings(fident).value(), ","));
|
||||
if (field_value) {
|
||||
out[fname] = std::move(field_value).value();
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
@ -92,14 +97,39 @@ SearchDocData BaseAccessor::SerializeDocument(const search::Schema& schema) cons
|
|||
return Serialize(schema);
|
||||
}
|
||||
|
||||
BaseAccessor::StringList ListPackAccessor::GetStrings(string_view active_field) const {
|
||||
auto strsv = container_utils::LpFind(lp_, active_field, intbuf_[0].data());
|
||||
return strsv.has_value() ? StringList{*strsv} : StringList{};
|
||||
std::optional<BaseAccessor::VectorInfo> BaseAccessor::GetVector(
|
||||
std::string_view active_field) const {
|
||||
auto strings_list = GetStrings(active_field);
|
||||
if (strings_list) {
|
||||
return !strings_list->empty() ? search::BytesToFtVectorSafe(strings_list->front())
|
||||
: VectorInfo{};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
BaseAccessor::VectorInfo ListPackAccessor::GetVector(string_view active_field) const {
|
||||
auto strlist = GetStrings(active_field);
|
||||
return strlist.empty() ? VectorInfo{} : search::BytesToFtVector(strlist.front());
|
||||
std::optional<BaseAccessor::NumsList> BaseAccessor::GetNumbers(
|
||||
std::string_view active_field) const {
|
||||
auto strings_list = GetStrings(active_field);
|
||||
if (!strings_list) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
NumsList nums_list;
|
||||
nums_list.reserve(strings_list->size());
|
||||
for (auto str : strings_list.value()) {
|
||||
auto num = search::ParseNumericField(str);
|
||||
if (!num) {
|
||||
return std::nullopt;
|
||||
}
|
||||
nums_list.push_back(num.value());
|
||||
}
|
||||
return nums_list;
|
||||
}
|
||||
|
||||
std::optional<BaseAccessor::StringList> ListPackAccessor::GetStrings(
|
||||
string_view active_field) const {
|
||||
auto strsv = container_utils::LpFind(lp_, active_field, intbuf_[0].data());
|
||||
return strsv.has_value() ? StringList{*strsv} : StringList{};
|
||||
}
|
||||
|
||||
SearchDocData ListPackAccessor::Serialize(const search::Schema& schema) const {
|
||||
|
@ -114,27 +144,29 @@ SearchDocData ListPackAccessor::Serialize(const search::Schema& schema) const {
|
|||
string_view v = container_utils::LpGetView(fptr, intbuf_[1].data());
|
||||
fptr = lpNext(lp_, fptr);
|
||||
|
||||
out[k] = ExtractSortableValue(schema, k, v);
|
||||
auto field_value = ExtractSortableValue(schema, k, v);
|
||||
if (field_value) {
|
||||
out[k] = std::move(field_value).value();
|
||||
}
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
BaseAccessor::StringList StringMapAccessor::GetStrings(string_view active_field) const {
|
||||
std::optional<BaseAccessor::StringList> StringMapAccessor::GetStrings(
|
||||
string_view active_field) const {
|
||||
auto it = hset_->Find(active_field);
|
||||
return it != hset_->end() ? StringList{SdsToSafeSv(it->second)} : StringList{};
|
||||
}
|
||||
|
||||
BaseAccessor::VectorInfo StringMapAccessor::GetVector(string_view active_field) const {
|
||||
auto strlist = GetStrings(active_field);
|
||||
return strlist.empty() ? VectorInfo{} : search::BytesToFtVector(strlist.front());
|
||||
}
|
||||
|
||||
SearchDocData StringMapAccessor::Serialize(const search::Schema& schema) const {
|
||||
SearchDocData out{};
|
||||
for (const auto& [kptr, vptr] : *hset_)
|
||||
out[SdsToSafeSv(kptr)] = ExtractSortableValue(schema, SdsToSafeSv(kptr), SdsToSafeSv(vptr));
|
||||
|
||||
for (const auto& [kptr, vptr] : *hset_) {
|
||||
auto field_value = ExtractSortableValue(schema, SdsToSafeSv(kptr), SdsToSafeSv(vptr));
|
||||
if (field_value) {
|
||||
out[SdsToSafeSv(kptr)] = std::move(field_value).value();
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -159,27 +191,54 @@ struct JsonAccessor::JsonPathContainer {
|
|||
variant<json::Path, jsoncons::jsonpath::jsonpath_expression<JsonType>> val;
|
||||
};
|
||||
|
||||
BaseAccessor::StringList JsonAccessor::GetStrings(string_view active_field) const {
|
||||
std::optional<BaseAccessor::StringList> JsonAccessor::GetStrings(string_view active_field) const {
|
||||
auto* path = GetPath(active_field);
|
||||
if (!path)
|
||||
return {};
|
||||
return search::EmptyAccessResult<StringList>();
|
||||
|
||||
auto path_res = path->Evaluate(json_);
|
||||
if (path_res.empty())
|
||||
return {};
|
||||
return search::EmptyAccessResult<StringList>();
|
||||
|
||||
if (path_res.size() == 1 && !path_res[0].is_array()) {
|
||||
if (!path_res[0].is_string())
|
||||
return std::nullopt;
|
||||
|
||||
if (path_res.size() == 1) {
|
||||
buf_ = path_res[0].as_string();
|
||||
return {buf_};
|
||||
return StringList{buf_};
|
||||
}
|
||||
|
||||
buf_.clear();
|
||||
|
||||
// First, grow buffer and compute string sizes
|
||||
vector<size_t> sizes;
|
||||
for (const auto& element : path_res) {
|
||||
|
||||
auto add_json_to_buf = [&](const JsonType& json) {
|
||||
size_t start = buf_.size();
|
||||
buf_ += element.as_string();
|
||||
buf_ += json.as_string();
|
||||
sizes.push_back(buf_.size() - start);
|
||||
};
|
||||
|
||||
if (!path_res[0].is_array()) {
|
||||
sizes.reserve(path_res.size());
|
||||
for (const auto& element : path_res) {
|
||||
if (!element.is_string())
|
||||
return std::nullopt;
|
||||
|
||||
add_json_to_buf(element);
|
||||
}
|
||||
} else {
|
||||
if (path_res.size() > 1) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
sizes.reserve(path_res[0].size());
|
||||
for (const auto& element : path_res[0].array_range()) {
|
||||
if (!element.is_string())
|
||||
return std::nullopt;
|
||||
|
||||
add_json_to_buf(element);
|
||||
}
|
||||
}
|
||||
|
||||
// Reposition start pointers to the most recent allocation of buf
|
||||
|
@ -194,23 +253,62 @@ BaseAccessor::StringList JsonAccessor::GetStrings(string_view active_field) cons
|
|||
return out;
|
||||
}
|
||||
|
||||
BaseAccessor::VectorInfo JsonAccessor::GetVector(string_view active_field) const {
|
||||
std::optional<BaseAccessor::VectorInfo> JsonAccessor::GetVector(string_view active_field) const {
|
||||
auto* path = GetPath(active_field);
|
||||
if (!path)
|
||||
return {};
|
||||
return VectorInfo{};
|
||||
|
||||
auto res = path->Evaluate(json_);
|
||||
if (res.empty())
|
||||
return {nullptr, 0};
|
||||
return VectorInfo{};
|
||||
|
||||
if (!res[0].is_array())
|
||||
return std::nullopt;
|
||||
|
||||
size_t size = res[0].size();
|
||||
auto ptr = make_unique<float[]>(size);
|
||||
|
||||
size_t i = 0;
|
||||
for (const auto& v : res[0].array_range())
|
||||
for (const auto& v : res[0].array_range()) {
|
||||
if (!v.is_number()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
ptr[i++] = v.as<float>();
|
||||
}
|
||||
|
||||
return {std::move(ptr), size};
|
||||
return BaseAccessor::VectorInfo{std::move(ptr), size};
|
||||
}
|
||||
|
||||
std::optional<BaseAccessor::NumsList> JsonAccessor::GetNumbers(string_view active_field) const {
|
||||
auto* path = GetPath(active_field);
|
||||
if (!path)
|
||||
return search::EmptyAccessResult<NumsList>();
|
||||
|
||||
auto path_res = path->Evaluate(json_);
|
||||
if (path_res.empty())
|
||||
return search::EmptyAccessResult<NumsList>();
|
||||
|
||||
NumsList nums_list;
|
||||
if (!path_res[0].is_array()) {
|
||||
nums_list.reserve(path_res.size());
|
||||
for (const auto& element : path_res) {
|
||||
if (!element.is_number())
|
||||
return std::nullopt;
|
||||
nums_list.push_back(element.as<double>());
|
||||
}
|
||||
} else {
|
||||
if (path_res.size() > 1) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
nums_list.reserve(path_res[0].size());
|
||||
for (const auto& element : path_res[0].array_range()) {
|
||||
if (!element.is_number())
|
||||
return std::nullopt;
|
||||
nums_list.push_back(element.as<double>());
|
||||
}
|
||||
}
|
||||
return nums_list;
|
||||
}
|
||||
|
||||
JsonAccessor::JsonPathContainer* JsonAccessor::GetPath(std::string_view field) const {
|
||||
|
@ -259,8 +357,12 @@ SearchDocData JsonAccessor::Serialize(
|
|||
SearchDocData out{};
|
||||
for (const auto& [ident, name] : fields) {
|
||||
if (auto* path = GetPath(ident); path) {
|
||||
if (auto res = path->Evaluate(json_); !res.empty())
|
||||
out[name] = ExtractSortableValueFromJson(schema, ident, res[0]);
|
||||
if (auto res = path->Evaluate(json_); !res.empty()) {
|
||||
auto field_value = ExtractSortableValueFromJson(schema, ident, res[0]);
|
||||
if (field_value) {
|
||||
out[name] = std::move(field_value).value();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include "core/json/json_object.h"
|
||||
#include "core/search/search.h"
|
||||
#include "core/search/vector_utils.h"
|
||||
#include "server/common.h"
|
||||
#include "server/search/doc_index.h"
|
||||
#include "server/table.h"
|
||||
|
@ -37,6 +38,10 @@ struct BaseAccessor : public search::DocumentAccessor {
|
|||
indexed field
|
||||
*/
|
||||
virtual SearchDocData SerializeDocument(const search::Schema& schema) const;
|
||||
|
||||
// Default implementation uses GetStrings
|
||||
virtual std::optional<VectorInfo> GetVector(std::string_view active_field) const;
|
||||
virtual std::optional<NumsList> GetNumbers(std::string_view active_field) const;
|
||||
};
|
||||
|
||||
// Accessor for hashes stored with listpack
|
||||
|
@ -46,8 +51,7 @@ struct ListPackAccessor : public BaseAccessor {
|
|||
explicit ListPackAccessor(LpPtr ptr) : lp_{ptr} {
|
||||
}
|
||||
|
||||
StringList GetStrings(std::string_view field) const override;
|
||||
VectorInfo GetVector(std::string_view field) const override;
|
||||
std::optional<StringList> GetStrings(std::string_view field) const override;
|
||||
SearchDocData Serialize(const search::Schema& schema) const override;
|
||||
|
||||
private:
|
||||
|
@ -60,8 +64,7 @@ struct StringMapAccessor : public BaseAccessor {
|
|||
explicit StringMapAccessor(StringMap* hset) : hset_{hset} {
|
||||
}
|
||||
|
||||
StringList GetStrings(std::string_view field) const override;
|
||||
VectorInfo GetVector(std::string_view field) const override;
|
||||
std::optional<StringList> GetStrings(std::string_view field) const override;
|
||||
SearchDocData Serialize(const search::Schema& schema) const override;
|
||||
|
||||
private:
|
||||
|
@ -75,8 +78,9 @@ struct JsonAccessor : public BaseAccessor {
|
|||
explicit JsonAccessor(const JsonType* json) : json_{*json} {
|
||||
}
|
||||
|
||||
StringList GetStrings(std::string_view field) const override;
|
||||
VectorInfo GetVector(std::string_view field) const override;
|
||||
std::optional<StringList> GetStrings(std::string_view field) const override;
|
||||
std::optional<VectorInfo> GetVector(std::string_view field) const override;
|
||||
std::optional<NumsList> GetNumbers(std::string_view active_field) const override;
|
||||
|
||||
// The JsonAccessor works with structured types and not plain strings, so an overload is needed
|
||||
SearchDocData Serialize(const search::Schema& schema,
|
||||
|
|
|
@ -41,7 +41,7 @@ void TraverseAllMatching(const DocIndex& index, const OpArgs& op_args, F&& f) {
|
|||
return;
|
||||
|
||||
auto accessor = GetAccessor(op_args.db_cntx, pv);
|
||||
f(key, accessor.get());
|
||||
f(key, *accessor);
|
||||
};
|
||||
|
||||
PrimeTable::Cursor cursor;
|
||||
|
@ -146,12 +146,14 @@ ShardDocIndex::DocId ShardDocIndex::DocKeyIndex::Add(string_view key) {
|
|||
return id;
|
||||
}
|
||||
|
||||
ShardDocIndex::DocId ShardDocIndex::DocKeyIndex::Remove(string_view key) {
|
||||
DCHECK_GT(ids_.count(key), 0u);
|
||||
std::optional<ShardDocIndex::DocId> ShardDocIndex::DocKeyIndex::Remove(string_view key) {
|
||||
auto it = ids_.extract(key);
|
||||
if (!it) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
DocId id = ids_.find(key)->second;
|
||||
const DocId id = it.mapped();
|
||||
keys_[id] = "";
|
||||
ids_.erase(key);
|
||||
free_ids_.push_back(id);
|
||||
|
||||
return id;
|
||||
|
@ -184,7 +186,13 @@ void ShardDocIndex::Rebuild(const OpArgs& op_args, PMR_NS::memory_resource* mr)
|
|||
key_index_ = DocKeyIndex{};
|
||||
indices_.emplace(base_->schema, base_->options, mr);
|
||||
|
||||
auto cb = [this](string_view key, BaseAccessor* doc) { indices_->Add(key_index_.Add(key), doc); };
|
||||
auto cb = [this](string_view key, const BaseAccessor& doc) {
|
||||
DocId id = key_index_.Add(key);
|
||||
if (!indices_->Add(id, doc)) {
|
||||
key_index_.Remove(key);
|
||||
}
|
||||
};
|
||||
|
||||
TraverseAllMatching(*base_, op_args, cb);
|
||||
|
||||
VLOG(1) << "Indexed " << key_index_.Size() << " docs on " << base_->prefix;
|
||||
|
@ -195,7 +203,10 @@ void ShardDocIndex::AddDoc(string_view key, const DbContext& db_cntx, const Prim
|
|||
return;
|
||||
|
||||
auto accessor = GetAccessor(db_cntx, pv);
|
||||
indices_->Add(key_index_.Add(key), accessor.get());
|
||||
DocId id = key_index_.Add(key);
|
||||
if (!indices_->Add(id, *accessor)) {
|
||||
key_index_.Remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
void ShardDocIndex::RemoveDoc(string_view key, const DbContext& db_cntx, const PrimeValue& pv) {
|
||||
|
@ -203,8 +214,10 @@ void ShardDocIndex::RemoveDoc(string_view key, const DbContext& db_cntx, const P
|
|||
return;
|
||||
|
||||
auto accessor = GetAccessor(db_cntx, pv);
|
||||
DocId id = key_index_.Remove(key);
|
||||
indices_->Remove(id, accessor.get());
|
||||
auto id = key_index_.Remove(key);
|
||||
if (id) {
|
||||
indices_->Remove(id.value(), *accessor);
|
||||
}
|
||||
}
|
||||
|
||||
bool ShardDocIndex::Matches(string_view key, unsigned obj_code) const {
|
||||
|
|
|
@ -133,7 +133,7 @@ class ShardDocIndex {
|
|||
// DocKeyIndex manages mapping document keys to ids and vice versa through a simple interface.
|
||||
struct DocKeyIndex {
|
||||
DocId Add(std::string_view key);
|
||||
DocId Remove(std::string_view key);
|
||||
std::optional<DocId> Remove(std::string_view key);
|
||||
|
||||
std::string_view Get(DocId id) const;
|
||||
size_t Size() const;
|
||||
|
|
|
@ -712,7 +712,7 @@ void SearchFamily::FtList(CmdArgList args, Transaction* tx, SinkReplyBuilder* bu
|
|||
return OpStatus::OK;
|
||||
});
|
||||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
rb->SendStringArr(names);
|
||||
rb->SendBulkStrArr(names);
|
||||
}
|
||||
|
||||
void SearchFamily::FtSearch(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) {
|
||||
|
@ -920,7 +920,7 @@ void SearchFamily::FtTagVals(CmdArgList args, Transaction* tx, SinkReplyBuilder*
|
|||
vector<string> vec(result_set.begin(), result_set.end());
|
||||
|
||||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
rb->SendStringArr(vec, RedisReplyBuilder::SET);
|
||||
rb->SendBulkStrArr(vec, RedisReplyBuilder::SET);
|
||||
}
|
||||
|
||||
void SearchFamily::FtAggregate(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) {
|
||||
|
|
|
@ -90,6 +90,13 @@ template <typename... Args> auto IsArray(Args... args) {
|
|||
template <typename... Args> auto IsUnordArray(Args... args) {
|
||||
return RespArray(UnorderedElementsAre(std::forward<Args>(args)...));
|
||||
}
|
||||
template <typename Expected, size_t... Is>
|
||||
void BuildKvMatchers(std::vector<Matcher<std::pair<std::string, RespExpr>>>& kv_matchers,
|
||||
const Expected& expected, std::index_sequence<Is...>) {
|
||||
std::initializer_list<int>{
|
||||
(kv_matchers.emplace_back(Pair(std::get<Is * 2>(expected), std::get<Is * 2 + 1>(expected))),
|
||||
0)...};
|
||||
}
|
||||
|
||||
MATCHER_P(IsMapMatcher, expected, "") {
|
||||
if (arg.type != RespExpr::ARRAY) {
|
||||
|
@ -97,73 +104,29 @@ MATCHER_P(IsMapMatcher, expected, "") {
|
|||
return false;
|
||||
}
|
||||
|
||||
constexpr size_t expected_size = std::tuple_size<decltype(expected)>::value;
|
||||
constexpr size_t exprected_pairs_number = expected_size / 2;
|
||||
|
||||
auto result = arg.GetVec();
|
||||
if (result.size() != expected.size()) {
|
||||
if (result.size() != expected_size) {
|
||||
*result_listener << "Wrong resp array size: " << result.size();
|
||||
return false;
|
||||
}
|
||||
|
||||
using KeyValueArray = std::vector<std::pair<std::string, std::string>>;
|
||||
|
||||
KeyValueArray received_pairs;
|
||||
std::vector<std::pair<std::string, RespExpr>> received_pairs;
|
||||
for (size_t i = 0; i < result.size(); i += 2) {
|
||||
received_pairs.emplace_back(result[i].GetString(), result[i + 1].GetString());
|
||||
received_pairs.emplace_back(result[i].GetString(), result[i + 1]);
|
||||
}
|
||||
|
||||
KeyValueArray expected_pairs;
|
||||
for (size_t i = 0; i < expected.size(); i += 2) {
|
||||
expected_pairs.emplace_back(expected[i], expected[i + 1]);
|
||||
}
|
||||
std::vector<Matcher<std::pair<std::string, RespExpr>>> kv_matchers;
|
||||
BuildKvMatchers(kv_matchers, expected, std::make_index_sequence<exprected_pairs_number>{});
|
||||
|
||||
// Custom unordered comparison
|
||||
std::sort(received_pairs.begin(), received_pairs.end());
|
||||
std::sort(expected_pairs.begin(), expected_pairs.end());
|
||||
|
||||
return received_pairs == expected_pairs;
|
||||
}
|
||||
|
||||
template <typename... Matchers> auto IsMap(Matchers... matchers) {
|
||||
return IsMapMatcher(std::vector<std::string>{std::forward<Matchers>(matchers)...});
|
||||
}
|
||||
|
||||
MATCHER_P(IsUnordArrayWithSizeMatcher, expected, "") {
|
||||
if (arg.type != RespExpr::ARRAY) {
|
||||
*result_listener << "Wrong response type: " << arg.type;
|
||||
return false;
|
||||
}
|
||||
|
||||
auto result = arg.GetVec();
|
||||
size_t expected_size = std::tuple_size<decltype(expected)>::value;
|
||||
if (result.size() != expected_size + 1) {
|
||||
*result_listener << "Wrong resp array size: " << result.size();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (result[0].GetInt() != expected_size) {
|
||||
*result_listener << "Wrong elements count: " << result[0].GetInt().value_or(-1);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<RespExpr> received_elements(result.begin() + 1, result.end());
|
||||
|
||||
// Create a vector of matchers from the tuple
|
||||
std::vector<Matcher<RespExpr>> matchers;
|
||||
std::apply([&matchers](auto&&... args) { ((matchers.push_back(args)), ...); }, expected);
|
||||
|
||||
return ExplainMatchResult(UnorderedElementsAreArray(matchers), received_elements,
|
||||
return ExplainMatchResult(UnorderedElementsAreArray(kv_matchers), received_pairs,
|
||||
result_listener);
|
||||
}
|
||||
|
||||
template <typename... Matchers> auto IsUnordArrayWithSize(Matchers... matchers) {
|
||||
return IsUnordArrayWithSizeMatcher(std::make_tuple(matchers...));
|
||||
}
|
||||
|
||||
template <typename Expected, size_t... Is>
|
||||
void BuildKvMatchers(std::vector<Matcher<std::pair<std::string, RespExpr>>>& kv_matchers,
|
||||
const Expected& expected, std::index_sequence<Is...>) {
|
||||
std::initializer_list<int>{
|
||||
(kv_matchers.emplace_back(Pair(std::get<Is * 2>(expected), std::get<Is * 2 + 1>(expected))),
|
||||
0)...};
|
||||
template <typename... Args> auto IsMap(Args... args) {
|
||||
return IsMapMatcher(std::make_tuple(args...));
|
||||
}
|
||||
|
||||
MATCHER_P(IsMapWithSizeMatcher, expected, "") {
|
||||
|
@ -201,6 +164,38 @@ template <typename... Args> auto IsMapWithSize(Args... args) {
|
|||
return IsMapWithSizeMatcher(std::make_tuple(args...));
|
||||
}
|
||||
|
||||
MATCHER_P(IsUnordArrayWithSizeMatcher, expected, "") {
|
||||
if (arg.type != RespExpr::ARRAY) {
|
||||
*result_listener << "Wrong response type: " << arg.type;
|
||||
return false;
|
||||
}
|
||||
|
||||
auto result = arg.GetVec();
|
||||
size_t expected_size = std::tuple_size<decltype(expected)>::value;
|
||||
if (result.size() != expected_size + 1) {
|
||||
*result_listener << "Wrong resp array size: " << result.size();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (result[0].GetInt() != expected_size) {
|
||||
*result_listener << "Wrong elements count: " << result[0].GetInt().value_or(-1);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<RespExpr> received_elements(result.begin() + 1, result.end());
|
||||
|
||||
// Create a vector of matchers from the tuple
|
||||
std::vector<Matcher<RespExpr>> matchers;
|
||||
std::apply([&matchers](auto&&... args) { ((matchers.push_back(args)), ...); }, expected);
|
||||
|
||||
return ExplainMatchResult(UnorderedElementsAreArray(matchers), received_elements,
|
||||
result_listener);
|
||||
}
|
||||
|
||||
template <typename... Matchers> auto IsUnordArrayWithSize(Matchers... matchers) {
|
||||
return IsUnordArrayWithSizeMatcher(std::make_tuple(matchers...));
|
||||
}
|
||||
|
||||
TEST_F(SearchFamilyTest, CreateDropListIndex) {
|
||||
EXPECT_EQ(Run({"ft.create", "idx-1", "ON", "HASH", "PREFIX", "1", "prefix-1"}), "OK");
|
||||
EXPECT_EQ(Run({"ft.create", "idx-2", "ON", "JSON", "PREFIX", "1", "prefix-2"}), "OK");
|
||||
|
@ -649,7 +644,7 @@ TEST_F(SearchFamilyTest, TestReturn) {
|
|||
|
||||
// Check non-existing field
|
||||
resp = Run({"ft.search", "i1", "@justA:0", "return", "1", "nothere"});
|
||||
EXPECT_THAT(resp, MatchEntry("k0", "nothere", ""));
|
||||
EXPECT_THAT(resp, MatchEntry("k0"));
|
||||
|
||||
// Checl implcit __vector_score is provided
|
||||
float score = 20;
|
||||
|
@ -1194,8 +1189,8 @@ TEST_F(SearchFamilyTest, AggregateWithLoadOptionHard) {
|
|||
IsMap("foo_total", "10", "word", "item1")));
|
||||
|
||||
// Test JSON
|
||||
Run({"JSON.SET", "j1", ".", R"({"word":"item1","foo":"10","text":"first key"})"});
|
||||
Run({"JSON.SET", "j2", ".", R"({"word":"item2","foo":"20","text":"second key"})"});
|
||||
Run({"JSON.SET", "j1", ".", R"({"word":"item1","foo":10,"text":"first key"})"});
|
||||
Run({"JSON.SET", "j2", ".", R"({"word":"item2","foo":20,"text":"second key"})"});
|
||||
|
||||
resp = Run({"FT.CREATE", "i2", "ON", "JSON", "SCHEMA", "$.word", "AS", "word", "TAG", "$.foo",
|
||||
"AS", "foo", "NUMERIC", "$.text", "AS", "text", "TEXT"});
|
||||
|
@ -1214,4 +1209,220 @@ TEST_F(SearchFamilyTest, AggregateWithLoadOptionHard) {
|
|||
}
|
||||
#endif
|
||||
|
||||
TEST_F(SearchFamilyTest, WrongFieldTypeJson) {
|
||||
// Test simple
|
||||
Run({"JSON.SET", "j1", ".", R"({"value":"one"})"});
|
||||
Run({"JSON.SET", "j2", ".", R"({"value":1})"});
|
||||
|
||||
EXPECT_EQ(Run({"FT.CREATE", "i1", "ON", "JSON", "SCHEMA", "$.value", "AS", "value", "NUMERIC",
|
||||
"SORTABLE"}),
|
||||
"OK");
|
||||
|
||||
auto resp = Run({"FT.SEARCH", "i1", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2"));
|
||||
|
||||
resp = Run({"FT.AGGREGATE", "i1", "*", "LOAD", "1", "$.value"});
|
||||
EXPECT_THAT(resp, IsUnordArrayWithSize(IsMap("$.value", "1")));
|
||||
|
||||
// Test with two fields. One is loading
|
||||
Run({"JSON.SET", "j3", ".", R"({"value":"two","another_value":1})"});
|
||||
Run({"JSON.SET", "j4", ".", R"({"value":2,"another_value":2})"});
|
||||
|
||||
EXPECT_EQ(Run({"FT.CREATE", "i2", "ON", "JSON", "SCHEMA", "$.value", "AS", "value", "NUMERIC"}),
|
||||
"OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "i2", "*", "LOAD", "1", "$.another_value"});
|
||||
EXPECT_THAT(
|
||||
resp, IsMapWithSize("j2", IsMap("$", R"({"value":1})"), "j4",
|
||||
IsMap("$", R"({"another_value":2,"value":2})", "$.another_value", "2")));
|
||||
|
||||
resp = Run({"FT.AGGREGATE", "i2", "*", "LOAD", "2", "$.value", "$.another_value", "GROUPBY", "2",
|
||||
"$.value", "$.another_value", "REDUCE", "COUNT", "0", "AS", "count"});
|
||||
EXPECT_THAT(resp,
|
||||
IsUnordArrayWithSize(
|
||||
IsMap("$.value", "1", "$.another_value", ArgType(RespExpr::NIL), "count", "1"),
|
||||
IsMap("$.value", "2", "$.another_value", "2", "count", "1")));
|
||||
|
||||
// Test multiple field values
|
||||
Run({"JSON.SET", "j5", ".", R"({"arr":[{"id":1},{"id":"two"}]})"});
|
||||
Run({"JSON.SET", "j6", ".", R"({"arr":[{"id":1},{"id":2}]})"});
|
||||
Run({"JSON.SET", "j7", ".", R"({"arr":[]})"});
|
||||
|
||||
resp = Run({"FT.CREATE", "i3", "ON", "JSON", "SCHEMA", "$.arr[*].id", "AS", "id", "NUMERIC"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "i3", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j1", "j2", "j3", "j4", "j6", "j7")); // Only j5 fails
|
||||
|
||||
resp = Run({"FT.CREATE", "i4", "ON", "JSON", "SCHEMA", "$.arr[*].id", "AS", "id", "NUMERIC",
|
||||
"SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "i4", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j1", "j2", "j3", "j4", "j6", "j7")); // Only j5 fails
|
||||
}
|
||||
|
||||
TEST_F(SearchFamilyTest, WrongFieldTypeHash) {
|
||||
// Test simple
|
||||
Run({"HSET", "h1", "value", "one"});
|
||||
Run({"HSET", "h2", "value", "1"});
|
||||
|
||||
EXPECT_EQ(Run({"FT.CREATE", "i1", "ON", "HASH", "SCHEMA", "value", "NUMERIC", "SORTABLE"}), "OK");
|
||||
|
||||
auto resp = Run({"FT.SEARCH", "i1", "*"});
|
||||
EXPECT_THAT(resp, IsMapWithSize("h2", IsMap("value", "1")));
|
||||
|
||||
resp = Run({"FT.AGGREGATE", "i1", "*", "LOAD", "1", "@value"});
|
||||
EXPECT_THAT(resp, IsUnordArrayWithSize(IsMap("value", "1")));
|
||||
|
||||
// Test with two fields. One is loading
|
||||
Run({"HSET", "h3", "value", "two", "another_value", "1"});
|
||||
Run({"HSET", "h4", "value", "2", "another_value", "2"});
|
||||
|
||||
EXPECT_EQ(Run({"FT.CREATE", "i2", "ON", "HASH", "SCHEMA", "value", "NUMERIC"}), "OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "i2", "*", "LOAD", "1", "@another_value"});
|
||||
EXPECT_THAT(resp, IsMapWithSize("h2", IsMap("value", "1"), "h4",
|
||||
IsMap("value", "2", "another_value", "2")));
|
||||
|
||||
resp = Run({"FT.AGGREGATE", "i2", "*", "LOAD", "2", "@value", "@another_value", "GROUPBY", "2",
|
||||
"@value", "@another_value", "REDUCE", "COUNT", "0", "AS", "count"});
|
||||
EXPECT_THAT(resp, IsUnordArrayWithSize(
|
||||
IsMap("value", "1", "another_value", ArgType(RespExpr::NIL), "count", "1"),
|
||||
IsMap("value", "2", "another_value", "2", "count", "1")));
|
||||
}
|
||||
|
||||
TEST_F(SearchFamilyTest, WrongFieldTypeHardJson) {
|
||||
Run({"JSON.SET", "j1", ".", R"({"data":1,"name":"doc_with_int"})"});
|
||||
Run({"JSON.SET", "j2", ".", R"({"data":"1","name":"doc_with_int_as_string"})"});
|
||||
Run({"JSON.SET", "j3", ".", R"({"data":"string","name":"doc_with_string"})"});
|
||||
Run({"JSON.SET", "j4", ".", R"({"name":"no_data"})"});
|
||||
Run({"JSON.SET", "j5", ".", R"({"data":[5,4,3],"name":"doc_with_vector"})"});
|
||||
Run({"JSON.SET", "j6", ".", R"({"data":"[5,4,3]","name":"doc_with_vector_as_string"})"});
|
||||
|
||||
auto resp = Run({"FT.CREATE", "i1", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "NUMERIC"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run(
|
||||
{"FT.CREATE", "i2", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "NUMERIC", "SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i3", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "TAG"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp =
|
||||
Run({"FT.CREATE", "i4", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "TAG", "SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i5", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "TEXT"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp =
|
||||
Run({"FT.CREATE", "i6", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "TEXT", "SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i7", "ON", "JSON", "SCHEMA", "$.data", "AS", "data", "VECTOR", "FLAT",
|
||||
"6", "TYPE", "FLOAT32", "DIM", "3", "DISTANCE_METRIC", "L2"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "i1", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j1", "j4", "j5"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i2", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j1", "j4", "j5"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i3", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2", "j3", "j6", "j4"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i4", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2", "j3", "j6", "j4"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i5", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j4", "j2", "j3", "j6"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i6", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j4", "j2", "j3", "j6"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i7", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j4", "j5"));
|
||||
}
|
||||
|
||||
TEST_F(SearchFamilyTest, WrongFieldTypeHardHash) {
|
||||
Run({"HSET", "j1", "data", "1", "name", "doc_with_int"});
|
||||
Run({"HSET", "j2", "data", "1", "name", "doc_with_int_as_string"});
|
||||
Run({"HSET", "j3", "data", "string", "name", "doc_with_string"});
|
||||
Run({"HSET", "j4", "name", "no_data"});
|
||||
Run({"HSET", "j5", "data", "5,4,3", "name", "doc_with_fake_vector"});
|
||||
Run({"HSET", "j6", "data", "[5,4,3]", "name", "doc_with_fake_vector_as_string"});
|
||||
|
||||
// Vector [1, 2, 3]
|
||||
std::string vector = std::string("\x3f\x80\x00\x00\x40\x00\x00\x00\x40\x40\x00\x00", 12);
|
||||
Run({"HSET", "j7", "data", vector, "name", "doc_with_vector [1, 2, 3]"});
|
||||
|
||||
auto resp = Run({"FT.CREATE", "i1", "ON", "HASH", "SCHEMA", "data", "NUMERIC"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i2", "ON", "HASH", "SCHEMA", "data", "NUMERIC", "SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i3", "ON", "HASH", "SCHEMA", "data", "TAG"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i4", "ON", "HASH", "SCHEMA", "data", "TAG", "SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i5", "ON", "HASH", "SCHEMA", "data", "TEXT"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i6", "ON", "HASH", "SCHEMA", "data", "TEXT", "SORTABLE"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.CREATE", "i7", "ON", "HASH", "SCHEMA", "data", "VECTOR", "FLAT", "6", "TYPE",
|
||||
"FLOAT32", "DIM", "3", "DISTANCE_METRIC", "L2"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "i1", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2", "j1", "j4"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i2", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2", "j1", "j4"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i3", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2", "j7", "j3", "j6", "j1", "j4", "j5"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i4", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j2", "j7", "j3", "j6", "j1", "j4", "j5"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i5", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j4", "j2", "j7", "j3", "j6", "j1", "j5"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i6", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j4", "j2", "j7", "j3", "j6", "j1", "j5"));
|
||||
|
||||
resp = Run({"FT.SEARCH", "i7", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j4", "j7"));
|
||||
}
|
||||
|
||||
TEST_F(SearchFamilyTest, WrongVectorFieldType) {
|
||||
Run({"JSON.SET", "j1", ".",
|
||||
R"({"vector_field": [0.1, 0.2, 0.3], "name": "doc_with_correct_dim"})"});
|
||||
Run({"JSON.SET", "j2", ".", R"({"vector_field": [0.1, 0.2], "name": "doc_with_small_dim"})"});
|
||||
Run({"JSON.SET", "j3", ".",
|
||||
R"({"vector_field": [0.1, 0.2, 0.3, 0.4], "name": "doc_with_large_dim"})"});
|
||||
Run({"JSON.SET", "j4", ".", R"({"vector_field": [1, 2, 3], "name": "doc_with_int_values"})"});
|
||||
Run({"JSON.SET", "j5", ".",
|
||||
R"({"vector_field":"not_vector", "name":"doc_with_incorrect_field_type"})"});
|
||||
Run({"JSON.SET", "j6", ".", R"({"name":"doc_with_no_field"})"});
|
||||
Run({"JSON.SET", "j7", ".",
|
||||
R"({"vector_field": [999999999999999999999999999999999999999, -999999999999999999999999999999999999999, 500000000000000000000000000000000000000], "name": "doc_with_out_of_range_values"})"});
|
||||
|
||||
auto resp =
|
||||
Run({"FT.CREATE", "index", "ON", "JSON", "SCHEMA", "$.vector_field", "AS", "vector_field",
|
||||
"VECTOR", "FLAT", "6", "TYPE", "FLOAT32", "DIM", "3", "DISTANCE_METRIC", "L2"});
|
||||
EXPECT_EQ(resp, "OK");
|
||||
|
||||
resp = Run({"FT.SEARCH", "index", "*"});
|
||||
EXPECT_THAT(resp, AreDocIds("j6", "j7", "j1", "j4"));
|
||||
}
|
||||
|
||||
} // namespace dfly
|
||||
|
|
|
@ -1533,7 +1533,7 @@ void ServerFamily::ConfigureMetrics(util::HttpListenerBase* http_base) {
|
|||
|
||||
auto cb = [this](const util::http::QueryArgs& args, util::HttpContext* send) {
|
||||
StringResponse resp = util::http::MakeStringResponse(boost::beast::http::status::ok);
|
||||
PrintPrometheusMetrics(this->GetMetrics(&namespaces.GetDefaultNamespace()),
|
||||
PrintPrometheusMetrics(this->GetMetrics(&namespaces->GetDefaultNamespace()),
|
||||
this->dfly_cmd_.get(), &resp);
|
||||
|
||||
return send->Invoke(std::move(resp));
|
||||
|
@ -1608,7 +1608,7 @@ void ServerFamily::StatsMC(std::string_view section, SinkReplyBuilder* builder)
|
|||
double utime = dbl_time(ru.ru_utime);
|
||||
double systime = dbl_time(ru.ru_stime);
|
||||
|
||||
Metrics m = GetMetrics(&namespaces.GetDefaultNamespace());
|
||||
Metrics m = GetMetrics(&namespaces->GetDefaultNamespace());
|
||||
|
||||
ADD_LINE(pid, getpid());
|
||||
ADD_LINE(uptime, m.uptime);
|
||||
|
@ -1638,7 +1638,7 @@ GenericError ServerFamily::DoSave(bool ignore_state) {
|
|||
const CommandId* cid = service().FindCmd("SAVE");
|
||||
CHECK_NOTNULL(cid);
|
||||
boost::intrusive_ptr<Transaction> trans(new Transaction{cid});
|
||||
trans->InitByArgs(&namespaces.GetDefaultNamespace(), 0, {});
|
||||
trans->InitByArgs(&namespaces->GetDefaultNamespace(), 0, {});
|
||||
return DoSave(absl::GetFlag(FLAGS_df_snapshot_format), {}, trans.get(), ignore_state);
|
||||
}
|
||||
|
||||
|
@ -1826,7 +1826,7 @@ bool ServerFamily::DoAuth(ConnectionContext* cntx, std::string_view username,
|
|||
cntx->acl_commands = cred.acl_commands;
|
||||
cntx->keys = std::move(cred.keys);
|
||||
cntx->pub_sub = std::move(cred.pub_sub);
|
||||
cntx->ns = &namespaces.GetOrInsert(cred.ns);
|
||||
cntx->ns = &namespaces->GetOrInsert(cred.ns);
|
||||
cntx->authenticated = true;
|
||||
}
|
||||
return is_authorized;
|
||||
|
@ -1968,7 +1968,7 @@ void ServerFamily::Config(CmdArgList args, Transaction* tx, SinkReplyBuilder* bu
|
|||
}
|
||||
}
|
||||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
return rb->SendStringArr(res, RedisReplyBuilder::MAP);
|
||||
return rb->SendBulkStrArr(res, RedisReplyBuilder::MAP);
|
||||
}
|
||||
|
||||
if (sub_cmd == "RESETSTAT") {
|
||||
|
@ -3008,7 +3008,7 @@ void ServerFamily::Role(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
|
||||
void ServerFamily::Script(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
ConnectionContext* cntx) {
|
||||
script_mgr_->Run(std::move(args), tx, builder);
|
||||
script_mgr_->Run(std::move(args), tx, builder, cntx);
|
||||
}
|
||||
|
||||
void ServerFamily::LastSave(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
|
|
|
@ -969,7 +969,7 @@ struct SetReplies {
|
|||
if (script) // output is sorted under scripts
|
||||
sort(sv->begin(), sv->end());
|
||||
|
||||
rb->SendStringArr(*sv, RedisReplyBuilder::SET);
|
||||
rb->SendBulkStrArr(*sv, RedisReplyBuilder::SET);
|
||||
}
|
||||
|
||||
void Send(const ResultSetView& rsv) {
|
||||
|
@ -1122,7 +1122,7 @@ void SPop(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) {
|
|||
rb->SendBulkString(result.value().front());
|
||||
}
|
||||
} else { // SPOP key cnt
|
||||
rb->SendStringArr(*result, RedisReplyBuilder::SET);
|
||||
rb->SendBulkStrArr(*result, RedisReplyBuilder::SET);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -1240,7 +1240,7 @@ void SRandMember(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder) {
|
|||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
if (result || result == OpStatus::KEY_NOTFOUND) {
|
||||
if (is_count) {
|
||||
rb->SendStringArr(*result, RedisReplyBuilder::SET);
|
||||
rb->SendBulkStrArr(*result, RedisReplyBuilder::SET);
|
||||
} else if (result->size()) {
|
||||
rb->SendBulkString(result->front());
|
||||
} else {
|
||||
|
|
|
@ -518,17 +518,32 @@ OpResult<array<int64_t, 5>> OpThrottle(const OpArgs& op_args, const string_view
|
|||
return array<int64_t, 5>{limited ? 1 : 0, limit, remaining, retry_after_ms, reset_after_ms};
|
||||
}
|
||||
|
||||
struct GetResp {
|
||||
string key; // TODO: to use backing storage to optimize this as well.
|
||||
string_view value;
|
||||
uint64_t mc_ver = 0; // 0 means we do not output it (i.e has not been requested).
|
||||
uint32_t mc_flag = 0;
|
||||
};
|
||||
|
||||
struct MGetResponse {
|
||||
explicit MGetResponse(size_t size = 0) : resp_arr(size) {
|
||||
}
|
||||
|
||||
std::unique_ptr<char[]> storage;
|
||||
absl::InlinedVector<std::optional<GetResp>, 2> resp_arr;
|
||||
};
|
||||
|
||||
// fetch_mask values
|
||||
constexpr uint8_t FETCH_MCFLAG = 0x1;
|
||||
constexpr uint8_t FETCH_MCVER = 0x2;
|
||||
SinkReplyBuilder::MGetResponse OpMGet(util::fb2::BlockingCounter wait_bc, uint8_t fetch_mask,
|
||||
const Transaction* t, EngineShard* shard) {
|
||||
MGetResponse OpMGet(util::fb2::BlockingCounter wait_bc, uint8_t fetch_mask, const Transaction* t,
|
||||
EngineShard* shard) {
|
||||
ShardArgs keys = t->GetShardArgs(shard->shard_id());
|
||||
DCHECK(!keys.Empty());
|
||||
|
||||
auto& db_slice = t->GetDbSlice(shard->shard_id());
|
||||
|
||||
SinkReplyBuilder::MGetResponse response(keys.Size());
|
||||
MGetResponse response(keys.Size());
|
||||
absl::InlinedVector<DbSlice::ConstIterator, 32> iters(keys.Size());
|
||||
|
||||
// First, fetch all iterators and count total size ahead
|
||||
|
@ -543,8 +558,8 @@ SinkReplyBuilder::MGetResponse OpMGet(util::fb2::BlockingCounter wait_bc, uint8_
|
|||
}
|
||||
|
||||
// Allocate enough for all values
|
||||
response.storage_list = SinkReplyBuilder::AllocMGetStorage(total_size);
|
||||
char* next = response.storage_list->data;
|
||||
response.storage = make_unique<char[]>(total_size);
|
||||
char* next = response.storage.get();
|
||||
bool fetch_mcflag = fetch_mask & FETCH_MCFLAG;
|
||||
bool fetch_mcver = fetch_mask & FETCH_MCVER;
|
||||
for (size_t i = 0; i < iters.size(); ++i) {
|
||||
|
@ -650,7 +665,7 @@ void ExtendGeneric(CmdArgList args, bool prepend, Transaction* tx, SinkReplyBuil
|
|||
string_view value = ArgS(args, 1);
|
||||
VLOG(2) << "ExtendGeneric(" << key << ", " << value << ")";
|
||||
|
||||
if (builder->type() == SinkReplyBuilder::REDIS) {
|
||||
if (builder->GetProtocol() == Protocol::REDIS) {
|
||||
auto cb = [&](Transaction* t, EngineShard* shard) {
|
||||
return OpExtend(t->GetOpArgs(shard), key, value, prepend);
|
||||
};
|
||||
|
@ -662,7 +677,7 @@ void ExtendGeneric(CmdArgList args, bool prepend, Transaction* tx, SinkReplyBuil
|
|||
rb->SendLong(GetResult(std::move(res.value())));
|
||||
} else {
|
||||
// Memcached skips if key is missing
|
||||
DCHECK(builder->type() == SinkReplyBuilder::MC);
|
||||
DCHECK(builder->GetProtocol() == Protocol::MEMCACHE);
|
||||
|
||||
auto cb = [&](Transaction* t, EngineShard* shard) {
|
||||
return ExtendOrSkip(t->GetOpArgs(shard), key, value, prepend);
|
||||
|
@ -723,7 +738,7 @@ void SetExGeneric(bool seconds, CmdArgList args, const CommandId* cid, Transacti
|
|||
}
|
||||
|
||||
void IncrByGeneric(string_view key, int64_t val, Transaction* tx, SinkReplyBuilder* builder) {
|
||||
bool skip_on_missing = builder->type() == SinkReplyBuilder::MC;
|
||||
bool skip_on_missing = (builder->GetProtocol() == Protocol::MEMCACHE);
|
||||
|
||||
auto cb = [&](Transaction* t, EngineShard* shard) {
|
||||
OpResult<int64_t> res = OpIncrBy(t->GetOpArgs(shard), key, val, skip_on_missing);
|
||||
|
@ -974,7 +989,7 @@ void StringFamily::Set(CmdArgList args, Transaction* tx, SinkReplyBuilder* build
|
|||
|
||||
// Remove existed key if the key is expired already
|
||||
if (rel_ms < 0) {
|
||||
tx->ScheduleSingleHop([key](const Transaction* tx, EngineShard* es) {
|
||||
tx->ScheduleSingleHop([](const Transaction* tx, EngineShard* es) {
|
||||
ShardArgs args = tx->GetShardArgs(es->shard_id());
|
||||
GenericFamily::OpDel(tx->GetOpArgs(es), args);
|
||||
return OpStatus::OK;
|
||||
|
@ -1253,10 +1268,9 @@ void StringFamily::DecrBy(CmdArgList args, Transaction* tx, SinkReplyBuilder* bu
|
|||
void StringFamily::MGet(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
ConnectionContext* cntx) {
|
||||
DCHECK_GE(args.size(), 1U);
|
||||
std::vector<SinkReplyBuilder::MGetResponse> mget_resp(shard_set->size());
|
||||
|
||||
uint8_t fetch_mask = 0;
|
||||
if (builder->type() == SinkReplyBuilder::MC) {
|
||||
if (builder->GetProtocol() == Protocol::MEMCACHE) {
|
||||
fetch_mask |= FETCH_MCFLAG;
|
||||
if (cntx->conn_state.memcache_flag & ConnectionState::FETCH_CAS_VER)
|
||||
fetch_mask |= FETCH_MCVER;
|
||||
|
@ -1264,6 +1278,7 @@ void StringFamily::MGet(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
|
||||
// Count of pending tiered reads
|
||||
util::fb2::BlockingCounter tiering_bc{0};
|
||||
std::vector<MGetResponse> mget_resp(shard_set->size());
|
||||
auto cb = [&](Transaction* t, EngineShard* shard) {
|
||||
mget_resp[shard->shard_id()] = OpMGet(tiering_bc, fetch_mask, t, shard);
|
||||
return OpStatus::OK;
|
||||
|
@ -1275,18 +1290,14 @@ void StringFamily::MGet(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
// wait for all tiered reads to finish
|
||||
tiering_bc->Wait();
|
||||
|
||||
// reorder the responses back according to the order of their corresponding
|
||||
// keys.
|
||||
SinkReplyBuilder::MGetResponse res(args.size());
|
||||
// reorder shard results back according to argument order
|
||||
absl::FixedArray<optional<GetResp>, 8> res(args.size());
|
||||
|
||||
for (ShardId sid = 0; sid < mget_resp.size(); ++sid) {
|
||||
if (!tx->IsActive(sid))
|
||||
continue;
|
||||
|
||||
SinkReplyBuilder::MGetResponse& src = mget_resp[sid];
|
||||
src.storage_list->next = res.storage_list;
|
||||
res.storage_list = src.storage_list;
|
||||
src.storage_list = nullptr;
|
||||
auto& src = mget_resp[sid];
|
||||
ShardArgs shard_args = tx->GetShardArgs(sid);
|
||||
unsigned src_indx = 0;
|
||||
for (auto it = shard_args.begin(); it != shard_args.end(); ++it, ++src_indx) {
|
||||
|
@ -1295,14 +1306,32 @@ void StringFamily::MGet(CmdArgList args, Transaction* tx, SinkReplyBuilder* buil
|
|||
|
||||
uint32_t indx = it.index();
|
||||
|
||||
res.resp_arr[indx] = std::move(src.resp_arr[src_indx]);
|
||||
if (builder->type() == SinkReplyBuilder::MC) {
|
||||
res.resp_arr[indx]->key = *it;
|
||||
res[indx] = std::move(src.resp_arr[src_indx]);
|
||||
if (builder->GetProtocol() == Protocol::MEMCACHE) {
|
||||
res[indx]->key = *it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return builder->SendMGetResponse(std::move(res));
|
||||
SinkReplyBuilder::ReplyScope scope(builder);
|
||||
if (builder->GetProtocol() == Protocol::MEMCACHE) {
|
||||
auto* rb = static_cast<MCReplyBuilder*>(builder);
|
||||
for (const auto& entry : res) {
|
||||
if (!entry)
|
||||
continue;
|
||||
rb->SendValue(entry->key, entry->value, entry->mc_ver, entry->mc_flag);
|
||||
}
|
||||
rb->SendSimpleString("END");
|
||||
} else {
|
||||
auto* rb = static_cast<RedisReplyBuilder*>(builder);
|
||||
rb->StartArray(res.size());
|
||||
for (const auto& entry : res) {
|
||||
if (entry)
|
||||
rb->SendBulkString(entry->value);
|
||||
else
|
||||
rb->SendNull();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void StringFamily::MSet(CmdArgList args, Transaction* tx, SinkReplyBuilder* builder,
|
||||
|
@ -1559,7 +1588,7 @@ void StringFamily::Register(CommandRegistry* registry) {
|
|||
<< CI{"SUBSTR", CO::READONLY, 4, 1, 1}.HFUNC(GetRange) // Alias for GetRange
|
||||
<< CI{"SETRANGE", CO::WRITE | CO::DENYOOM, 4, 1, 1}.HFUNC(SetRange)
|
||||
<< CI{"CL.THROTTLE", CO::WRITE | CO::DENYOOM | CO::FAST, -5, 1, 1, acl::THROTTLE}.HFUNC(
|
||||
ClThrottle);
|
||||
ClThrottle);
|
||||
}
|
||||
|
||||
} // namespace dfly
|
||||
|
|
|
@ -59,7 +59,7 @@ static vector<string> SplitLines(const std::string& src) {
|
|||
|
||||
TestConnection::TestConnection(Protocol protocol, io::StringSink* sink)
|
||||
: facade::Connection(protocol, nullptr, nullptr, nullptr), sink_(sink) {
|
||||
cc_.reset(new dfly::ConnectionContext(sink_, this, {}));
|
||||
cc_.reset(new dfly::ConnectionContext(this, {}));
|
||||
cc_->skip_acl_validation = true;
|
||||
SetSocket(ProactorBase::me()->CreateSocket());
|
||||
OnConnectionStart();
|
||||
|
@ -82,7 +82,7 @@ void TransactionSuspension::Start() {
|
|||
|
||||
transaction_ = new dfly::Transaction{&cid};
|
||||
|
||||
auto st = transaction_->InitByArgs(&namespaces.GetDefaultNamespace(), 0, {});
|
||||
auto st = transaction_->InitByArgs(&namespaces->GetDefaultNamespace(), 0, {});
|
||||
CHECK_EQ(st, OpStatus::OK);
|
||||
|
||||
transaction_->Execute([](Transaction* t, EngineShard* shard) { return OpStatus::OK; }, false);
|
||||
|
@ -109,7 +109,7 @@ class BaseFamilyTest::TestConnWrapper {
|
|||
|
||||
ConnectionContext* cmd_cntx() {
|
||||
auto cntx = static_cast<ConnectionContext*>(dummy_conn_->cntx());
|
||||
cntx->ns = &namespaces.GetDefaultNamespace();
|
||||
cntx->ns = &namespaces->GetDefaultNamespace();
|
||||
return cntx;
|
||||
}
|
||||
|
||||
|
@ -125,6 +125,10 @@ class BaseFamilyTest::TestConnWrapper {
|
|||
return dummy_conn_.get();
|
||||
}
|
||||
|
||||
SinkReplyBuilder* builder() {
|
||||
return builder_.get();
|
||||
}
|
||||
|
||||
private:
|
||||
::io::StringSink sink_; // holds the response blob
|
||||
|
||||
|
@ -133,10 +137,19 @@ class BaseFamilyTest::TestConnWrapper {
|
|||
std::vector<std::unique_ptr<std::string>> tmp_str_vec_;
|
||||
|
||||
std::unique_ptr<RedisParser> parser_;
|
||||
std::unique_ptr<SinkReplyBuilder> builder_;
|
||||
};
|
||||
|
||||
BaseFamilyTest::TestConnWrapper::TestConnWrapper(Protocol proto)
|
||||
: dummy_conn_(new TestConnection(proto, &sink_)) {
|
||||
switch (proto) {
|
||||
case Protocol::REDIS:
|
||||
builder_.reset(new RedisReplyBuilder{&sink_});
|
||||
break;
|
||||
case Protocol::MEMCACHE:
|
||||
builder_.reset(new MCReplyBuilder{&sink_});
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BaseFamilyTest::TestConnWrapper::~TestConnWrapper() {
|
||||
|
@ -213,7 +226,7 @@ void BaseFamilyTest::ResetService() {
|
|||
used_mem_current = 0;
|
||||
|
||||
TEST_current_time_ms = absl::GetCurrentTimeNanos() / 1000000;
|
||||
auto default_ns = &namespaces.GetDefaultNamespace();
|
||||
auto default_ns = &namespaces->GetDefaultNamespace();
|
||||
auto cb = [&](EngineShard* s) {
|
||||
default_ns->GetDbSlice(s->shard_id()).UpdateExpireBase(TEST_current_time_ms - 1000, 0);
|
||||
};
|
||||
|
@ -250,7 +263,7 @@ void BaseFamilyTest::ResetService() {
|
|||
}
|
||||
|
||||
LOG(ERROR) << "TxLocks for shard " << es->shard_id();
|
||||
for (const auto& k_v : namespaces.GetDefaultNamespace()
|
||||
for (const auto& k_v : namespaces->GetDefaultNamespace()
|
||||
.GetDbSlice(es->shard_id())
|
||||
.GetDBTable(0)
|
||||
->trans_locks) {
|
||||
|
@ -305,7 +318,7 @@ void BaseFamilyTest::CleanupSnapshots() {
|
|||
|
||||
unsigned BaseFamilyTest::NumLocked() {
|
||||
atomic_uint count = 0;
|
||||
auto default_ns = &namespaces.GetDefaultNamespace();
|
||||
auto default_ns = &namespaces->GetDefaultNamespace();
|
||||
shard_set->RunBriefInParallel([&](EngineShard* shard) {
|
||||
for (const auto& db : default_ns->GetDbSlice(shard->shard_id()).databases()) {
|
||||
if (db == nullptr) {
|
||||
|
@ -386,11 +399,11 @@ RespExpr BaseFamilyTest::Run(std::string_view id, ArgSlice slice) {
|
|||
CmdArgVec args = conn_wrapper->Args(slice);
|
||||
|
||||
auto* context = conn_wrapper->cmd_cntx();
|
||||
context->ns = &namespaces.GetDefaultNamespace();
|
||||
context->ns = &namespaces->GetDefaultNamespace();
|
||||
|
||||
DCHECK(context->transaction == nullptr) << id;
|
||||
|
||||
service_->DispatchCommand(CmdArgList{args}, context->reply_builder_old(), context);
|
||||
service_->DispatchCommand(CmdArgList{args}, conn_wrapper->builder(), context);
|
||||
|
||||
DCHECK(context->transaction == nullptr);
|
||||
|
||||
|
@ -433,8 +446,7 @@ auto BaseFamilyTest::RunMC(MP::CmdType cmd_type, string_view key, string_view va
|
|||
|
||||
DCHECK(context->transaction == nullptr);
|
||||
|
||||
service_->DispatchMC(cmd, value, static_cast<MCReplyBuilder*>(context->reply_builder_old()),
|
||||
context);
|
||||
service_->DispatchMC(cmd, value, static_cast<MCReplyBuilder*>(conn->builder()), context);
|
||||
|
||||
DCHECK(context->transaction == nullptr);
|
||||
|
||||
|
@ -446,17 +458,7 @@ auto BaseFamilyTest::RunMC(MP::CmdType cmd_type, std::string_view key) -> MCResp
|
|||
return pp_->at(0)->Await([&] { return this->RunMC(cmd_type, key); });
|
||||
}
|
||||
|
||||
MP::Command cmd;
|
||||
cmd.type = cmd_type;
|
||||
cmd.key = key;
|
||||
TestConnWrapper* conn = AddFindConn(Protocol::MEMCACHE, GetId());
|
||||
|
||||
auto* context = conn->cmd_cntx();
|
||||
|
||||
service_->DispatchMC(cmd, string_view{},
|
||||
static_cast<MCReplyBuilder*>(context->reply_builder_old()), context);
|
||||
|
||||
return conn->SplitLines();
|
||||
return RunMC(cmd_type, key, string_view{}, 0, chrono::seconds{});
|
||||
}
|
||||
|
||||
auto BaseFamilyTest::GetMC(MP::CmdType cmd_type, std::initializer_list<std::string_view> list)
|
||||
|
@ -479,9 +481,7 @@ auto BaseFamilyTest::GetMC(MP::CmdType cmd_type, std::initializer_list<std::stri
|
|||
TestConnWrapper* conn = AddFindConn(Protocol::MEMCACHE, GetId());
|
||||
|
||||
auto* context = conn->cmd_cntx();
|
||||
|
||||
service_->DispatchMC(cmd, string_view{},
|
||||
static_cast<MCReplyBuilder*>(context->reply_builder_old()), context);
|
||||
service_->DispatchMC(cmd, string_view{}, static_cast<MCReplyBuilder*>(conn->builder()), context);
|
||||
|
||||
return conn->SplitLines();
|
||||
}
|
||||
|
@ -566,7 +566,7 @@ BaseFamilyTest::TestConnWrapper::GetInvalidationMessage(size_t index) const {
|
|||
}
|
||||
|
||||
bool BaseFamilyTest::IsLocked(DbIndex db_index, std::string_view key) const {
|
||||
return service_->IsLocked(&namespaces.GetDefaultNamespace(), db_index, key);
|
||||
return service_->IsLocked(&namespaces->GetDefaultNamespace(), db_index, key);
|
||||
}
|
||||
|
||||
string BaseFamilyTest::GetId() const {
|
||||
|
@ -654,7 +654,7 @@ vector<LockFp> BaseFamilyTest::GetLastFps() {
|
|||
|
||||
lock_guard lk(mu);
|
||||
for (auto fp :
|
||||
namespaces.GetDefaultNamespace().GetDbSlice(shard->shard_id()).TEST_GetLastLockedFps()) {
|
||||
namespaces->GetDefaultNamespace().GetDbSlice(shard->shard_id()).TEST_GetLastLockedFps()) {
|
||||
result.push_back(fp);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -117,7 +117,7 @@ class BaseFamilyTest : public ::testing::Test {
|
|||
static std::vector<std::string> StrArray(const RespExpr& expr);
|
||||
|
||||
Metrics GetMetrics() const {
|
||||
return service_->server_family().GetMetrics(&namespaces.GetDefaultNamespace());
|
||||
return service_->server_family().GetMetrics(&namespaces->GetDefaultNamespace());
|
||||
}
|
||||
|
||||
void ClearMetrics();
|
||||
|
|
|
@ -399,10 +399,12 @@ OpStatus Transaction::InitByArgs(Namespace* ns, DbIndex index, CmdArgList args)
|
|||
}
|
||||
|
||||
if ((cid_->opt_mask() & CO::NO_KEY_TRANSACTIONAL) > 0) {
|
||||
if ((cid_->opt_mask() & CO::NO_KEY_TX_SPAN_ALL) > 0)
|
||||
if (((cid_->opt_mask() & CO::NO_KEY_TX_SPAN_ALL) > 0)) {
|
||||
EnableAllShards();
|
||||
else
|
||||
} else {
|
||||
EnableShard(0);
|
||||
}
|
||||
|
||||
return OpStatus::OK;
|
||||
}
|
||||
|
||||
|
@ -976,7 +978,7 @@ string Transaction::DEBUG_PrintFailState(ShardId sid) const {
|
|||
void Transaction::EnableShard(ShardId sid) {
|
||||
unique_shard_cnt_ = 1;
|
||||
unique_shard_id_ = sid;
|
||||
shard_data_.resize(1);
|
||||
shard_data_.resize(IsActiveMulti() ? shard_set->size() : 1);
|
||||
shard_data_.front().local_mask |= ACTIVE;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,28 +57,29 @@ async def test_noreply_pipeline(df_server: DflyInstance, memcached_client: MCCli
|
|||
so all the commands are pipelined. Assert pipelines work correctly and the
|
||||
succeeding regular command receives a reply (it should join the pipeline as last).
|
||||
"""
|
||||
keys = [f"k{i}" for i in range(2000)]
|
||||
values = [f"d{i}" for i in range(len(keys))]
|
||||
|
||||
for k, v in zip(keys, values):
|
||||
memcached_client.set(k, v, noreply=True)
|
||||
|
||||
# quick follow up before the pipeline finishes
|
||||
assert memcached_client.get("k10") == b"d10"
|
||||
# check all commands were executed
|
||||
assert memcached_client.get_many(keys) == {k: v.encode() for k, v in zip(keys, values)}
|
||||
|
||||
client = df_server.client()
|
||||
info = await client.info()
|
||||
if info["total_pipelined_commands"] == 0:
|
||||
logging.error("No pipelined commands were detected. Info: \n" + str(info))
|
||||
for attempts in range(2):
|
||||
keys = [f"k{i}" for i in range(1000)]
|
||||
values = [f"d{i}" for i in range(len(keys))]
|
||||
|
||||
# Try again
|
||||
for k, v in zip(keys, values):
|
||||
memcached_client.set(k, v, noreply=True)
|
||||
|
||||
# quick follow up before the pipeline finishes
|
||||
assert memcached_client.get("k10") == b"d10"
|
||||
# check all commands were executed
|
||||
assert memcached_client.get_many(keys) == {k: v.encode() for k, v in zip(keys, values)}
|
||||
|
||||
info = await client.info()
|
||||
logging.error("Second Info: \n" + str(info))
|
||||
assert False
|
||||
if info["total_pipelined_commands"] > 100:
|
||||
return
|
||||
logging.warning(
|
||||
f"Have not identified pipelining at attempt {attempts} Info: \n" + str(info)
|
||||
)
|
||||
await client.flushall()
|
||||
|
||||
assert False, "Pipelining not detected"
|
||||
|
||||
|
||||
@dfly_args(DEFAULT_ARGS)
|
||||
|
|
Loading…
Reference in a new issue