mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-05 07:26:55 +00:00
fix: allow dropping metrics, labels and configuring histogram bucket boundaries to avoid high cardinality. (#8569) (#8629)
Signed-off-by: Rodrigo Fior Kuntzer <rodrigo@miro.com> Co-authored-by: Rodrigo Fior Kuntzer <rodrigo@miro.com> Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
parent
6594e11caa
commit
63b2376873
10 changed files with 490 additions and 53 deletions
|
@ -285,7 +285,9 @@ The chart values are organised per component.
|
|||
| metricsConfig.annotations | object | `{}` | Additional annotations to add to the configmap. |
|
||||
| metricsConfig.namespaces.include | list | `[]` | List of namespaces to capture metrics for. |
|
||||
| metricsConfig.namespaces.exclude | list | `[]` | list of namespaces to NOT capture metrics for. |
|
||||
| metricsConfig.metricsRefreshInterval | string | `nil` | Rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics |
|
||||
| metricsConfig.metricsRefreshInterval | string | `nil` | Rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics. WARNING: This flag is not working since Kyverno 1.8.0 |
|
||||
| metricsConfig.bucketBoundaries | list | `[0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10,15,20,25,30]` | Configures the bucket boundaries for all Histogram metrics, changing this configuration requires restart of the kyverno admission controller |
|
||||
| metricsConfig.metricsExposure | map | `nil` | Configures the exposure of individual metrics, by default all metrics and all labels are exported, changing this configuration requires restart of the kyverno admission controller |
|
||||
|
||||
### Features
|
||||
|
||||
|
|
|
@ -17,4 +17,10 @@ data:
|
|||
{{- with .Values.metricsConfig.metricsRefreshInterval }}
|
||||
metricsRefreshInterval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.metricsConfig.metricsExposure }}
|
||||
metricsExposure: {{ toJson . | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.metricsConfig.bucketBoundaries }}
|
||||
bucketBoundaries: {{ join ", " . | quote }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
|
@ -241,9 +241,21 @@ metricsConfig:
|
|||
# -- list of namespaces to NOT capture metrics for.
|
||||
exclude: []
|
||||
|
||||
# -- (string) Rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics
|
||||
# -- (string) Rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics. WARNING: This flag is not working since Kyverno 1.8.0
|
||||
metricsRefreshInterval: ~
|
||||
# metricsRefreshInterval: 24h
|
||||
# metricsRefreshInterval: 24h
|
||||
|
||||
# -- (list) Configures the bucket boundaries for all Histogram metrics, changing this configuration requires restart of the kyverno admission controller
|
||||
bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30]
|
||||
|
||||
# -- (map) Configures the exposure of individual metrics, by default all metrics and all labels are exported, changing this configuration requires restart of the kyverno admission controller
|
||||
metricsExposure: ~
|
||||
# metricsExposure:
|
||||
# kyverno_policy_execution_duration_seconds:
|
||||
# disabledLabelDimensions: ["resource_kind", "resource_namespace", "resource_request_operation"]
|
||||
# bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5]
|
||||
# kyverno_admission_review_duration_seconds:
|
||||
# enabled: false
|
||||
|
||||
# -- Image pull secrets for image verification policies, this will define the `--imagePullSecrets` argument
|
||||
imagePullSecrets: {}
|
||||
|
|
|
@ -202,6 +202,7 @@ metadata:
|
|||
app.kubernetes.io/version: latest
|
||||
data:
|
||||
namespaces: "{\"exclude\":[],\"include\":[]}"
|
||||
bucketBoundaries: "0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30"
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
|
|
|
@ -5,6 +5,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
|
@ -18,6 +20,10 @@ type MetricsConfiguration interface {
|
|||
GetMetricsRefreshInterval() time.Duration
|
||||
// CheckNamespace returns `true` if the namespace has to be considered
|
||||
CheckNamespace(string) bool
|
||||
// GetBucketBoundaries returns the bucket boundaries for Histogram metrics
|
||||
GetBucketBoundaries() []float64
|
||||
// BuildMeterProviderViews returns OTL view removing attributes which were disabled in the config
|
||||
BuildMeterProviderViews() []sdkmetric.View
|
||||
// Load loads configuration from a configmap
|
||||
Load(*corev1.ConfigMap)
|
||||
// OnChanged adds a callback to be invoked when the configuration is reloaded
|
||||
|
@ -28,19 +34,17 @@ type MetricsConfiguration interface {
|
|||
type metricsConfig struct {
|
||||
namespaces namespacesConfig
|
||||
metricsRefreshInterval time.Duration
|
||||
bucketBoundaries []float64
|
||||
metricsExposure map[string]metricExposureConfig
|
||||
mux sync.RWMutex
|
||||
callbacks []func()
|
||||
}
|
||||
|
||||
// NewDefaultMetricsConfiguration ...
|
||||
func NewDefaultMetricsConfiguration() *metricsConfig {
|
||||
return &metricsConfig{
|
||||
metricsRefreshInterval: 0,
|
||||
namespaces: namespacesConfig{
|
||||
IncludeNamespaces: []string{},
|
||||
ExcludeNamespaces: []string{},
|
||||
},
|
||||
}
|
||||
config := metricsConfig{}
|
||||
config.reset()
|
||||
return &config
|
||||
}
|
||||
|
||||
func (cd *metricsConfig) OnChanged(callback func()) {
|
||||
|
@ -63,6 +67,43 @@ func (mcd *metricsConfig) GetIncludeNamespaces() []string {
|
|||
return mcd.namespaces.IncludeNamespaces
|
||||
}
|
||||
|
||||
// GetBucketBoundaries returns the bucket boundaries for Histogram metrics
|
||||
func (mcd *metricsConfig) GetBucketBoundaries() []float64 {
|
||||
mcd.mux.RLock()
|
||||
defer mcd.mux.RUnlock()
|
||||
return mcd.bucketBoundaries
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) BuildMeterProviderViews() []sdkmetric.View {
|
||||
mcd.mux.RLock()
|
||||
defer mcd.mux.RUnlock()
|
||||
var views []sdkmetric.View
|
||||
for key, value := range mcd.metricsExposure {
|
||||
if *value.Enabled {
|
||||
views = append(views, sdkmetric.NewView(
|
||||
sdkmetric.Instrument{Name: key},
|
||||
sdkmetric.Stream{
|
||||
AttributeFilter: func(kv attribute.KeyValue) bool {
|
||||
return !slices.Contains(value.DisabledLabelDimensions, string(kv.Key))
|
||||
},
|
||||
Aggregation: sdkmetric.AggregationExplicitBucketHistogram{
|
||||
Boundaries: value.BucketBoundaries,
|
||||
NoMinMax: false,
|
||||
},
|
||||
},
|
||||
))
|
||||
} else if !*value.Enabled {
|
||||
views = append(views, sdkmetric.NewView(
|
||||
sdkmetric.Instrument{Name: key},
|
||||
sdkmetric.Stream{
|
||||
Aggregation: sdkmetric.AggregationDrop{},
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
return views
|
||||
}
|
||||
|
||||
// GetMetricsRefreshInterval returns the refresh interval for the metrics
|
||||
func (mcd *metricsConfig) GetMetricsRefreshInterval() time.Duration {
|
||||
mcd.mux.RLock()
|
||||
|
@ -105,11 +146,7 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
data = map[string]string{}
|
||||
}
|
||||
// reset
|
||||
cd.metricsRefreshInterval = 0
|
||||
cd.namespaces = namespacesConfig{
|
||||
IncludeNamespaces: []string{},
|
||||
ExcludeNamespaces: []string{},
|
||||
}
|
||||
cd.reset()
|
||||
// load metricsRefreshInterval
|
||||
metricsRefreshInterval, ok := data["metricsRefreshInterval"]
|
||||
if !ok {
|
||||
|
@ -138,17 +175,67 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
logger.Info("namespaces configured")
|
||||
}
|
||||
}
|
||||
// load bucket boundaries
|
||||
bucketBoundariesString, ok := data["bucketBoundaries"]
|
||||
if !ok {
|
||||
logger.Info("bucketBoundaries not set")
|
||||
} else {
|
||||
logger := logger.WithValues("bucketBoundaries", bucketBoundariesString)
|
||||
bucketBoundaries, err := parseBucketBoundariesConfig(bucketBoundariesString)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to parse bucketBoundariesString")
|
||||
} else {
|
||||
cd.bucketBoundaries = bucketBoundaries
|
||||
logger.Info("bucketBoundaries configured")
|
||||
}
|
||||
}
|
||||
// load include resource details
|
||||
metricsExposureString, ok := data["metricsExposure"]
|
||||
if !ok {
|
||||
logger.Info("metricsExposure not set")
|
||||
} else {
|
||||
logger := logger.WithValues("metricsExposure", metricsExposureString)
|
||||
metricsExposure, err := parseMetricExposureConfig(metricsExposureString, cd.bucketBoundaries)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to parse metricsExposure")
|
||||
} else {
|
||||
cd.metricsExposure = metricsExposure
|
||||
logger.Info("metricsExposure configured")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) unload() {
|
||||
mcd.mux.Lock()
|
||||
defer mcd.mux.Unlock()
|
||||
defer mcd.notify()
|
||||
mcd.reset()
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) reset() {
|
||||
mcd.metricsRefreshInterval = 0
|
||||
mcd.namespaces = namespacesConfig{
|
||||
IncludeNamespaces: []string{},
|
||||
ExcludeNamespaces: []string{},
|
||||
}
|
||||
mcd.bucketBoundaries = []float64{
|
||||
0.005,
|
||||
0.01,
|
||||
0.025,
|
||||
0.05,
|
||||
0.1,
|
||||
0.25,
|
||||
0.5,
|
||||
1,
|
||||
2.5,
|
||||
5,
|
||||
10,
|
||||
15,
|
||||
20,
|
||||
25,
|
||||
30,
|
||||
}
|
||||
mcd.metricsExposure = map[string]metricExposureConfig{}
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) notify() {
|
||||
|
|
184
pkg/config/metricsconfig_test.go
Normal file
184
pkg/config/metricsconfig_test.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func Test_metricsConfig_load(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configMap *corev1.ConfigMap
|
||||
expectedValue *metricsConfig
|
||||
}{
|
||||
{
|
||||
name: "Case 1: Test defaults",
|
||||
configMap: &corev1.ConfigMap{
|
||||
Data: map[string]string{},
|
||||
},
|
||||
expectedValue: &metricsConfig{
|
||||
metricsRefreshInterval: 0,
|
||||
namespaces: namespacesConfig{IncludeNamespaces: []string{}, ExcludeNamespaces: []string{}},
|
||||
bucketBoundaries: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30},
|
||||
metricsExposure: map[string]metricExposureConfig{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Case 2: All fields provided",
|
||||
configMap: &corev1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"metricsRefreshInterval": "10s",
|
||||
"namespaces": `{"include": ["namespace1"], "exclude": ["namespace2"]}`,
|
||||
"bucketBoundaries": "0.005, 0.01, 0.025, 0.05",
|
||||
"metricsExposure": `{"metric1": {"enabled": true, "disabledLabelDimensions": ["dim1"]}, "metric2": {"enabled": true, "disabledLabelDimensions": ["dim1","dim2"], "bucketBoundaries": [0.025, 0.05]}}`,
|
||||
},
|
||||
},
|
||||
expectedValue: &metricsConfig{
|
||||
metricsRefreshInterval: 10 * time.Second,
|
||||
namespaces: namespacesConfig{IncludeNamespaces: []string{"namespace1"}, ExcludeNamespaces: []string{"namespace2"}},
|
||||
bucketBoundaries: []float64{0.005, 0.01, 0.025, 0.05},
|
||||
metricsExposure: map[string]metricExposureConfig{
|
||||
"metric1": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1"}, BucketBoundaries: []float64{0.005, 0.01, 0.025, 0.05}},
|
||||
"metric2": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1", "dim2"}, BucketBoundaries: []float64{0.025, 0.05}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Case 3: Some of the fields provided",
|
||||
configMap: &corev1.ConfigMap{
|
||||
Data: map[string]string{
|
||||
"namespaces": `{"include": ["namespace1"], "exclude": ["namespace2"]}`,
|
||||
"metricsExposure": `{"metric1": {"enabled": true, "disabledLabelDimensions": ["dim1"]}, "metric2": {"enabled": true, "disabledLabelDimensions": ["dim1","dim2"], "bucketBoundaries": [0.025, 0.05]}}`,
|
||||
},
|
||||
},
|
||||
expectedValue: &metricsConfig{
|
||||
metricsRefreshInterval: 0,
|
||||
namespaces: namespacesConfig{IncludeNamespaces: []string{"namespace1"}, ExcludeNamespaces: []string{"namespace2"}},
|
||||
bucketBoundaries: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30},
|
||||
metricsExposure: map[string]metricExposureConfig{
|
||||
"metric1": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1"}, BucketBoundaries: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30}},
|
||||
"metric2": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1", "dim2"}, BucketBoundaries: []float64{0.025, 0.05}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cd := NewDefaultMetricsConfiguration()
|
||||
cd.load(tt.configMap)
|
||||
|
||||
if !reflect.DeepEqual(cd.metricsRefreshInterval, tt.expectedValue.metricsRefreshInterval) {
|
||||
t.Errorf("Expected %+v, but got %+v", tt.expectedValue.metricsRefreshInterval, cd.metricsRefreshInterval)
|
||||
}
|
||||
if !reflect.DeepEqual(cd.namespaces, tt.expectedValue.namespaces) {
|
||||
t.Errorf("Expected %+v, but got %+v", tt.expectedValue.namespaces, cd.namespaces)
|
||||
}
|
||||
if !reflect.DeepEqual(cd.bucketBoundaries, tt.expectedValue.bucketBoundaries) {
|
||||
t.Errorf("Expected %+v, but got %+v", tt.expectedValue.bucketBoundaries, cd.bucketBoundaries)
|
||||
}
|
||||
if !reflect.DeepEqual(cd.metricsExposure, tt.expectedValue.metricsExposure) {
|
||||
t.Errorf("Expected %+v, but got %+v", tt.expectedValue.metricsExposure, cd.metricsRefreshInterval)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_metricsConfig_BuildMeterProviderViews(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
metricsExposure map[string]metricExposureConfig
|
||||
expectedSize int
|
||||
validateFunc func([]sdkmetric.View) bool
|
||||
}{
|
||||
{
|
||||
name: "Case 1: defaults",
|
||||
metricsExposure: map[string]metricExposureConfig{},
|
||||
expectedSize: 0,
|
||||
},
|
||||
{
|
||||
name: "Case 2: metrics enabled",
|
||||
metricsExposure: map[string]metricExposureConfig{
|
||||
"metric1": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1"}, BucketBoundaries: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30}},
|
||||
},
|
||||
expectedSize: 1,
|
||||
validateFunc: func(views []sdkmetric.View) bool {
|
||||
stream, _ := views[0](sdkmetric.Instrument{Name: "metric1"})
|
||||
assert := stream.AttributeFilter(attribute.String("policy_validation_mode", ""))
|
||||
assert = assert && !stream.AttributeFilter(attribute.String("dim1", ""))
|
||||
assert = assert && reflect.DeepEqual(stream.Aggregation, sdkmetric.AggregationExplicitBucketHistogram{
|
||||
Boundaries: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30},
|
||||
NoMinMax: false,
|
||||
})
|
||||
return assert
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Case 3: metrics disabled",
|
||||
metricsExposure: map[string]metricExposureConfig{
|
||||
"metric1": {Enabled: boolPtr(false)},
|
||||
},
|
||||
expectedSize: 1,
|
||||
validateFunc: func(views []sdkmetric.View) bool {
|
||||
stream, _ := views[0](sdkmetric.Instrument{Name: "metric1"})
|
||||
return reflect.DeepEqual(stream.Aggregation, sdkmetric.AggregationDrop{})
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mcd := NewDefaultMetricsConfiguration()
|
||||
mcd.metricsExposure = tt.metricsExposure
|
||||
got := mcd.BuildMeterProviderViews()
|
||||
if len(got) != tt.expectedSize {
|
||||
t.Errorf("Expected result size to be %v, but got %v", tt.expectedSize, len(got))
|
||||
}
|
||||
if tt.validateFunc != nil {
|
||||
if !tt.validateFunc(got) {
|
||||
t.Errorf("The validation function did not return true!")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_metricsConfig_GetBucketBoundaries(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
provided []float64
|
||||
want []float64
|
||||
}{
|
||||
{
|
||||
name: "Case 1: Test defaults",
|
||||
provided: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30},
|
||||
want: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15, 20, 25, 30},
|
||||
},
|
||||
{
|
||||
name: "Case 2: Custom",
|
||||
provided: []float64{0.005, 0.01, 0.025, 0.05},
|
||||
want: []float64{0.005, 0.01, 0.025, 0.05},
|
||||
},
|
||||
{
|
||||
name: "Case 3: Empty",
|
||||
provided: []float64{},
|
||||
want: []float64{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mcd := NewDefaultMetricsConfiguration()
|
||||
mcd.bucketBoundaries = tt.provided
|
||||
if got := mcd.GetBucketBoundaries(); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetBucketBoundaries() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -2,7 +2,9 @@ package config
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
|
||||
|
@ -72,6 +74,36 @@ func parseIncludeExcludeNamespacesFromNamespacesConfig(in string) (namespacesCon
|
|||
return namespacesConfigObject, err
|
||||
}
|
||||
|
||||
type metricExposureConfig struct {
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
DisabledLabelDimensions []string `json:"disabledLabelDimensions,omitempty"`
|
||||
BucketBoundaries []float64 `json:"bucketBoundaries,omitempty"`
|
||||
}
|
||||
|
||||
func parseMetricExposureConfig(in string, defaultBoundaries []float64) (map[string]metricExposureConfig, error) {
|
||||
var metricExposureMap map[string]metricExposureConfig
|
||||
err := json.Unmarshal([]byte(in), &metricExposureMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for key, config := range metricExposureMap {
|
||||
if config.Enabled == nil {
|
||||
b := true
|
||||
config.Enabled = &b
|
||||
}
|
||||
if config.DisabledLabelDimensions == nil {
|
||||
config.DisabledLabelDimensions = []string{}
|
||||
}
|
||||
if config.BucketBoundaries == nil {
|
||||
config.BucketBoundaries = defaultBoundaries
|
||||
}
|
||||
metricExposureMap[key] = config
|
||||
}
|
||||
|
||||
return metricExposureMap, err
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
Group string
|
||||
Version string
|
||||
|
@ -124,3 +156,22 @@ func parseKinds(in string) []filter {
|
|||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
func parseBucketBoundariesConfig(boundariesString string) ([]float64, error) {
|
||||
var boundaries []float64
|
||||
boundariesString = strings.TrimSpace(boundariesString)
|
||||
|
||||
if boundariesString != "" {
|
||||
boundaryStrings := strings.Split(boundariesString, ",")
|
||||
for _, boundaryStr := range boundaryStrings {
|
||||
boundaryStr = strings.TrimSpace(boundaryStr)
|
||||
boundary, err := strconv.ParseFloat(boundaryStr, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid boundary value '%s'", boundaryStr)
|
||||
}
|
||||
boundaries = append(boundaries, boundary)
|
||||
}
|
||||
}
|
||||
|
||||
return boundaries, nil
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
@ -256,3 +257,116 @@ func Test_parseWebhookAnnotations(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseBucketBoundariesConfig(t *testing.T) {
|
||||
var emptyBoundaries []float64
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
expected []float64
|
||||
expectedError error
|
||||
}{
|
||||
{"0.005, 0.01, 0.025, 0.05", []float64{0.005, 0.01, 0.025, 0.05}, nil},
|
||||
{"0.1, 0.2, 0.3", []float64{0.1, 0.2, 0.3}, nil},
|
||||
{"0.1,0.2,0.3", []float64{0.1, 0.2, 0.3}, nil},
|
||||
{"", emptyBoundaries, nil},
|
||||
{" ", emptyBoundaries, nil},
|
||||
{"invalid, 0.01, 0.025, 0.05", nil, errors.New("invalid boundary value 'invalid'")},
|
||||
{"0.005, 0.01, , 0.05", nil, errors.New("invalid boundary value ''")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
boundaries, err := parseBucketBoundariesConfig(test.input)
|
||||
|
||||
if !reflect.DeepEqual(boundaries, test.expected) {
|
||||
t.Errorf("Expected boundaries %v, but got %v", test.expected, boundaries)
|
||||
}
|
||||
|
||||
if (err == nil && test.expectedError != nil) || (err != nil && err.Error() != test.expectedError.Error()) {
|
||||
t.Errorf("Expected error '%v', but got '%v'", test.expectedError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseMetricExposureConfig(t *testing.T) {
|
||||
boolPtr := func(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
defaultBoundaries := []float64{0.005, 0.01}
|
||||
tests := []struct {
|
||||
input string
|
||||
expected map[string]metricExposureConfig
|
||||
expectedError bool
|
||||
}{
|
||||
// Test case 1: Valid JSON with "enabled", "disabledLabelDimensions" and "bucketBoundaries" set
|
||||
{
|
||||
input: `{
|
||||
"key1": {"enabled": true, "disabledLabelDimensions": ["dim1", "dim2"], "bucketBoundaries": []},
|
||||
"key2": {"enabled": false, "disabledLabelDimensions": [], "bucketBoundaries": [1.01, 2.5, 5, 10]}
|
||||
}`,
|
||||
expected: map[string]metricExposureConfig{
|
||||
"key1": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1", "dim2"}, BucketBoundaries: []float64{}},
|
||||
"key2": {Enabled: boolPtr(false), DisabledLabelDimensions: []string{}, BucketBoundaries: []float64{1.01, 2.5, 5, 10}},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
// Test case 2: Valid JSON with only "disabledLabelDimensions" set
|
||||
{
|
||||
input: `{
|
||||
"key1": {"disabledLabelDimensions": ["dim1", "dim2"]}
|
||||
}`,
|
||||
expected: map[string]metricExposureConfig{
|
||||
"key1": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{"dim1", "dim2"}, BucketBoundaries: defaultBoundaries},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
// Test case 3: Valid JSON with "enabled" set to false
|
||||
{
|
||||
input: `{
|
||||
"key1": {"enabled": false}
|
||||
}`,
|
||||
expected: map[string]metricExposureConfig{
|
||||
"key1": {Enabled: boolPtr(false), DisabledLabelDimensions: []string{}, BucketBoundaries: defaultBoundaries},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
// Test case 4: Valid JSON with only "bucketBoundaries" set
|
||||
{
|
||||
input: `{
|
||||
"key1": {"bucketBoundaries": []},
|
||||
"key2": {"bucketBoundaries": [1.01, 2.5, 5, 10]}
|
||||
}`,
|
||||
expected: map[string]metricExposureConfig{
|
||||
"key1": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{}, BucketBoundaries: []float64{}},
|
||||
"key2": {Enabled: boolPtr(true), DisabledLabelDimensions: []string{}, BucketBoundaries: []float64{1.01, 2.5, 5, 10}},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
// Test case 5: Invalid JSON
|
||||
{
|
||||
input: "invalid-json",
|
||||
expected: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
configMap, err := parseMetricExposureConfig(test.input, defaultBoundaries)
|
||||
|
||||
if test.expectedError && err == nil {
|
||||
t.Error("Expected an error, but got nil")
|
||||
}
|
||||
|
||||
if !test.expectedError && err != nil {
|
||||
t.Errorf("Expected no error, but got: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(configMap, test.expected) {
|
||||
t.Errorf("Expected %+v, but got %+v", test.expected, configMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,12 +35,13 @@ func InitMetrics(
|
|||
transportCreds,
|
||||
kubeClient,
|
||||
logger,
|
||||
metricsConfiguration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
} else if otelProvider == "prometheus" {
|
||||
meterProvider, metricsServerMux, err = NewPrometheusConfig(ctx, logger)
|
||||
meterProvider, metricsServerMux, err = NewPrometheusConfig(ctx, logger, metricsConfiguration)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
|
|
@ -71,42 +71,22 @@ func ShutDownController(ctx context.Context, pusher *sdkmetric.MeterProvider) {
|
|||
}
|
||||
}
|
||||
|
||||
func aggregationSelector(ik sdkmetric.InstrumentKind) sdkmetric.Aggregation {
|
||||
switch ik {
|
||||
case sdkmetric.InstrumentKindHistogram:
|
||||
return sdkmetric.AggregationExplicitBucketHistogram{
|
||||
Boundaries: []float64{
|
||||
0.005,
|
||||
0.01,
|
||||
0.025,
|
||||
0.05,
|
||||
0.1,
|
||||
0.25,
|
||||
0.5,
|
||||
1,
|
||||
2.5,
|
||||
5,
|
||||
10,
|
||||
15,
|
||||
20,
|
||||
25,
|
||||
30,
|
||||
},
|
||||
NoMinMax: false,
|
||||
func aggregationSelector(metricsConfiguration kconfig.MetricsConfiguration) func(ik sdkmetric.InstrumentKind) sdkmetric.Aggregation {
|
||||
return func(ik sdkmetric.InstrumentKind) sdkmetric.Aggregation {
|
||||
switch ik {
|
||||
case sdkmetric.InstrumentKindHistogram:
|
||||
return sdkmetric.AggregationExplicitBucketHistogram{
|
||||
Boundaries: metricsConfiguration.GetBucketBoundaries(),
|
||||
NoMinMax: false,
|
||||
}
|
||||
default:
|
||||
return sdkmetric.DefaultAggregationSelector(ik)
|
||||
}
|
||||
default:
|
||||
return sdkmetric.DefaultAggregationSelector(ik)
|
||||
}
|
||||
}
|
||||
|
||||
func NewOTLPGRPCConfig(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
certs string,
|
||||
kubeClient kubernetes.Interface,
|
||||
log logr.Logger,
|
||||
) (metric.MeterProvider, error) {
|
||||
options := []otlpmetricgrpc.Option{otlpmetricgrpc.WithEndpoint(endpoint), otlpmetricgrpc.WithAggregationSelector(aggregationSelector)}
|
||||
func NewOTLPGRPCConfig(ctx context.Context, endpoint string, certs string, kubeClient kubernetes.Interface, log logr.Logger, configuration kconfig.MetricsConfiguration) (metric.MeterProvider, error) {
|
||||
options := []otlpmetricgrpc.Option{otlpmetricgrpc.WithEndpoint(endpoint), otlpmetricgrpc.WithAggregationSelector(aggregationSelector(configuration))}
|
||||
if certs != "" {
|
||||
// here the certificates are stored as configmaps
|
||||
transportCreds, err := tlsutils.FetchCert(ctx, certs, kubeClient)
|
||||
|
@ -144,14 +124,12 @@ func NewOTLPGRPCConfig(
|
|||
provider := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(reader),
|
||||
sdkmetric.WithResource(res),
|
||||
sdkmetric.WithView(configuration.BuildMeterProviderViews()...),
|
||||
)
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func NewPrometheusConfig(
|
||||
ctx context.Context,
|
||||
log logr.Logger,
|
||||
) (metric.MeterProvider, *http.ServeMux, error) {
|
||||
func NewPrometheusConfig(ctx context.Context, log logr.Logger, configuration kconfig.MetricsConfiguration) (metric.MeterProvider, *http.ServeMux, error) {
|
||||
res, err := resource.Merge(
|
||||
resource.Default(),
|
||||
resource.NewWithAttributes(
|
||||
|
@ -168,7 +146,7 @@ func NewPrometheusConfig(
|
|||
exporter, err := prometheus.New(
|
||||
prometheus.WithoutUnits(),
|
||||
prometheus.WithoutTargetInfo(),
|
||||
prometheus.WithAggregationSelector(aggregationSelector),
|
||||
prometheus.WithAggregationSelector(aggregationSelector(configuration)),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to initialize prometheus exporter")
|
||||
|
@ -177,6 +155,7 @@ func NewPrometheusConfig(
|
|||
provider := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(exporter),
|
||||
sdkmetric.WithResource(res),
|
||||
sdkmetric.WithView(configuration.BuildMeterProviderViews()...),
|
||||
)
|
||||
metricsServerMux := http.NewServeMux()
|
||||
metricsServerMux.Handle(config.MetricsPath, promhttp.Handler())
|
||||
|
|
Loading…
Add table
Reference in a new issue