1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

feat: add cleanup controller to helm chart (#5329)

* feat: add cleanup controller to helm chart

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* add webhook config

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* rbac

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fixes

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* secret

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* certs

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix labels

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* add server

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* rbac

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* handler

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix linter

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-11-14 18:30:12 +01:00 committed by GitHub
parent 511eb797e6
commit 86fc537ce0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 526 additions and 5 deletions

View file

@ -236,6 +236,7 @@ ko-build-all: ko-build-kyvernopre ko-build-kyverno ko-build-cli ko-build-cleanup
REGISTRY_USERNAME ?= dummy
KO_KYVERNOPRE_IMAGE := ko.local/github.com/kyverno/kyverno/cmd/initcontainer
KO_KYVERNO_IMAGE := ko.local/github.com/kyverno/kyverno/cmd/kyverno
KO_CLEANUP_IMAGE := ko.local/github.com/kyverno/kyverno/cmd/cleanup-controller
.PHONY: ko-login
ko-login: $(KO)
@ -253,6 +254,10 @@ ko-publish-kyverno: ko-login ## Build and publish kyverno image (with ko)
ko-publish-cli: ko-login ## Build and publish cli image (with ko)
@LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(REPO_CLI) $(KO) build $(CLI_DIR) --bare --tags=$(KO_TAGS) --platform=$(PLATFORMS)
.PHONY: ko-publish-cleanup-controller
ko-publish-cleanup-controller: ko-login ## Build and publish cleanup controller image (with ko)
@LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(REPO_CLEANUP) $(KO) build $(CLEANUP_DIR) --bare --tags=$(KO_TAGS) --platform=$(PLATFORMS)
.PHONY: ko-publish-kyvernopre-dev
ko-publish-kyvernopre-dev: ko-login ## Build and publish kyvernopre dev image (with ko)
@LD_FLAGS=$(LD_FLAGS_DEV) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(REPO_KYVERNOPRE) $(KO) build $(KYVERNOPRE_DIR) --bare --tags=$(KO_TAGS_DEV) --platform=$(PLATFORMS)
@ -265,11 +270,15 @@ ko-publish-kyverno-dev: ko-login ## Build and publish kyverno dev image (with ko
ko-publish-cli-dev: ko-login ## Build and publish cli dev image (with ko)
@LD_FLAGS=$(LD_FLAGS_DEV) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(REPO_CLI) $(KO) build $(CLI_DIR) --bare --tags=$(KO_TAGS_DEV) --platform=$(PLATFORMS)
.PHONY: ko-publish-cleanup-controller-dev
ko-publish-cleanup-controller-dev: ko-login ## Build and publish cleanup controller dev image (with ko)
@LD_FLAGS=$(LD_FLAGS_DEV) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(REPO_CLEANUP) $(KO) build $(CLEANUP_DIR) --bare --tags=$(KO_TAGS_DEV) --platform=$(PLATFORMS)
.PHONY: ko-publish-all
ko-publish-all: ko-publish-kyvernopre ko-publish-kyverno ko-publish-cli ## Build and publish all images (with ko)
ko-publish-all: ko-publish-kyvernopre ko-publish-kyverno ko-publish-cli ko-publish-cleanup-controller ## Build and publish all images (with ko)
.PHONY: ko-publish-all-dev
ko-publish-all-dev: ko-publish-kyvernopre-dev ko-publish-kyverno-dev ko-publish-cli-dev ## Build and publish all dev images (with ko)
ko-publish-all-dev: ko-publish-kyvernopre-dev ko-publish-kyverno-dev ko-publish-cli-dev ko-publish-cleanup-controller-dev ## Build and publish all dev images (with ko)
#################
# BUILD (IMAGE) #
@ -277,6 +286,7 @@ ko-publish-all-dev: ko-publish-kyvernopre-dev ko-publish-kyverno-dev ko-publish-
LOCAL_KYVERNOPRE_IMAGE := $($(shell echo $(BUILD_WITH) | tr '[:lower:]' '[:upper:]')_KYVERNOPRE_IMAGE)
LOCAL_KYVERNO_IMAGE := $($(shell echo $(BUILD_WITH) | tr '[:lower:]' '[:upper:]')_KYVERNO_IMAGE)
LOCAL_CLEANUP_IMAGE := $($(shell echo $(BUILD_WITH) | tr '[:lower:]' '[:upper:]')_CLEANUP_IMAGE)
.PHONY: image-build-kyvernopre
image-build-kyvernopre: $(BUILD_WITH)-build-kyvernopre
@ -287,6 +297,9 @@ image-build-kyverno: $(BUILD_WITH)-build-kyverno
.PHONY: image-build-cli
image-build-cli: $(BUILD_WITH)-build-cli
.PHONY: image-build-cleanup-controller
image-build-cleanup-controller: $(BUILD_WITH)-build-cleanup-controller
.PHONY: image-build-all
image-build-all: $(BUILD_WITH)-build-all
@ -651,13 +664,20 @@ kind-load-kyverno: $(KIND) image-build-kyverno ## Build kyverno image and load i
@echo Load kyverno image... >&2
@$(KIND) load docker-image --name $(KIND_NAME) $(LOCAL_KYVERNO_IMAGE):$(IMAGE_TAG_DEV)
.PHONY: kind-load-cleanup-controller
kind-load-cleanup-controller: $(KIND) image-build-cleanup-controller ## Build cleanup controller image and load it in kind cluster
@echo Load cleanup controller image... >&2
@$(KIND) load docker-image --name $(KIND_NAME) $(LOCAL_CLEANUP_IMAGE):$(IMAGE_TAG_DEV)
.PHONY: kind-load-all
kind-load-all: kind-load-kyvernopre kind-load-kyverno ## Build images and load them in kind cluster
kind-load-all: kind-load-kyvernopre kind-load-kyverno kind-load-cleanup-controller ## Build images and load them in kind cluster
.PHONY: kind-deploy-kyverno
kind-deploy-kyverno: $(HELM) kind-load-all ## Build images, load them in kind cluster and deploy kyverno helm chart
@echo Install kyverno chart... >&2
@$(HELM) upgrade --install kyverno --namespace kyverno --wait --create-namespace ./charts/kyverno \
--set cleanupController.image.repository=$(LOCAL_CLEANUP_IMAGE) \
--set cleanupController.image.tag=$(IMAGE_TAG_DEV) \
--set image.repository=$(LOCAL_KYVERNO_IMAGE) \
--set image.tag=$(IMAGE_TAG_DEV) \
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
@ -665,7 +685,7 @@ kind-deploy-kyverno: $(HELM) kind-load-all ## Build images, load them in kind cl
--set initContainer.extraArgs={--loggingFormat=text} \
--set "extraArgs={--loggingFormat=text}"
@echo Restart kyverno pods... >&2
@kubectl rollout restart deployment -n kyverno kyverno
@kubectl rollout restart deployment -n kyverno
.PHONY: kind-deploy-kyverno-policies
kind-deploy-kyverno-policies: $(HELM) ## Deploy kyverno-policies helm chart

View file

@ -214,6 +214,16 @@ The command removes all the Kubernetes components associated with the chart and
| grafana.enabled | bool | `false` | Enable grafana dashboard creation. |
| grafana.namespace | string | `nil` | Namespace to create the grafana dashboard configmap. If not set, it will be created in the same namespace where the chart is deployed. |
| grafana.annotations | object | `{}` | Grafana dashboard configmap annotations. |
| cleanupController.enabled | bool | `false` | Enable cleanup controller. |
| cleanupController.image.registry | string | `nil` | Image registry |
| cleanupController.image.repository | string | `"ghcr.io/kyverno/cleanup-controller"` | Image repository |
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
| cleanupController.service.port | int | `443` | Service port. |
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
| cleanupController.service.annotations | object | `{}` | Service annotations. |
## TLS Configuration

View file

@ -0,0 +1,29 @@
{{/* vim: set filetype=mustache: */}}
{{- define "kyverno.cleanup-controller.deploymentName" -}}
cleanup-controller
{{- end -}}
{{- define "kyverno.cleanup-controller.labels" -}}
app.kubernetes.io/component: cleanup-controller
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/name: {{ template "kyverno.name" . }}
app.kubernetes.io/part-of: {{ template "kyverno.name" . }}
app.kubernetes.io/version: "{{ .Chart.Version }}"
{{- end -}}
{{- define "kyverno.cleanup-controller.matchLabels" -}}
app.kubernetes.io/component: cleanup-controller
app.kubernetes.io/name: {{ template "kyverno.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "kyverno.cleanup-controller.image" -}}
{{- if .image.registry -}}
{{ .image.registry }}/{{ required "An image repository is required" .image.repository }}:{{ default .defaultTag .image.tag }}
{{- else -}}
{{ required "An image repository is required" .image.repository }}:{{ default .defaultTag .image.tag }}
{{- end -}}
{{- end }}

View file

@ -0,0 +1,25 @@
{{- if .Values.cleanupController.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
rules:
- apiGroups:
- kyverno.io
resources:
- clustercleanuppolicies
- cleanuppolicies
- clustercleanuppolicies/*
- cleanuppolicies/*
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- deletecollection
{{- end }}

View file

@ -0,0 +1,16 @@
{{- if .Values.cleanupController.enabled -}}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
namespace: {{ template "kyverno.namespace" . }}
{{- end -}}

View file

@ -0,0 +1,31 @@
{{- if .Values.cleanupController.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
spec:
replicas: 1
selector:
matchLabels:
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 8 }}
spec:
serviceAccountName: {{ template "kyverno.cleanup-controller.deploymentName" . }}
containers:
- name: controller
image: {{ include "kyverno.cleanup-controller.image" (dict "image" .Values.cleanupController.image "defaultTag" .Chart.AppVersion) | quote }}
ports:
- containerPort: 9443
name: https
protocol: TCP
- containerPort: 8000
name: metrics
protocol: TCP
env: []
{{- end -}}

View file

@ -0,0 +1,18 @@
{{- if .Values.cleanupController.enabled -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
{{- end -}}

View file

@ -0,0 +1,17 @@
{{- if .Values.cleanupController.enabled -}}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
namespace: {{ template "kyverno.namespace" . }}
{{- end -}}

View file

@ -0,0 +1,62 @@
{{- if .Values.cleanupController.enabled -}}
{{- $ca := genCA (printf "*.%s.svc" (include "kyverno.namespace" .)) 1024 -}}
{{- $svcName := (printf "%s.%s.svc" (include "kyverno.cleanup-controller.deploymentName" .) (include "kyverno.namespace" .)) -}}
{{- $cert := genSignedCert $svcName nil (list $svcName) 1024 $ca -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}-ca
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
type: kubernetes.io/tls
data:
tls.key: {{ $ca.Key | b64enc }}
tls.crt: {{ $ca.Cert | b64enc }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}-tls
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
type: kubernetes.io/tls
data:
tls.key: {{ $cert.Key | b64enc }}
tls.crt: {{ $cert.Cert | b64enc }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
caBundle: {{ $ca.Cert | b64enc }}
service:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
namespace: {{ template "kyverno.namespace" . }}
path: /todo
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: {{ printf "%s.%s.svc" (include "kyverno.cleanup-controller.deploymentName" .) (include "kyverno.namespace" .) }}
rules:
- apiGroups:
- kyverno.io
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- clustercleanuppolicies/*
- cleanuppolicies/*
scope: '*'
sideEffects: None
timeoutSeconds: 10
{{- end -}}

View file

@ -0,0 +1,21 @@
{{- if .Values.cleanupController.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
spec:
ports:
- port: {{ .Values.cleanupController.service.port }}
targetPort: https
protocol: TCP
name: https
{{- if and (eq .Values.cleanupController.service.type "NodePort") (not (empty .Values.cleanupController.service.nodePort)) }}
nodePort: {{ .Values.cleanupController.service.nodePort }}
{{- end }}
selector:
{{- include "kyverno.cleanup-controller.matchLabels" . | nindent 4 }}
type: {{ .Values.cleanupController.service.type }}
{{- end -}}

View file

@ -0,0 +1,9 @@
{{- if .Values.cleanupController.enabled -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
labels:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
{{- end -}}

View file

@ -475,3 +475,35 @@ grafana:
namespace:
# -- Grafana dashboard configmap annotations.
annotations: {}
cleanupController:
# -- Enable cleanup controller.
enabled: false
image:
# -- Image registry
registry:
# If you want to manage the registry you should remove it from the repository
# registry: ghcr.io
# repository: kyverno/kyverno
# -- Image repository
repository: ghcr.io/kyverno/cleanup-controller # kyverno: replaced in e2e tests
# -- Image tag
# Defaults to appVersion in Chart.yaml if omitted
tag: # replaced in e2e tests
# -- Image pull policy
pullPolicy: IfNotPresent
# -- Image pull secrets
pullSecrets: []
# - secretName
service:
# -- Service port.
port: 443
# -- Service type.
type: ClusterIP
# -- Service node port.
# Only used if `service.type` is `NodePort`.
nodePort:
# -- Service annotations.
annotations: {}

View file

@ -0,0 +1,37 @@
package main
import (
"context"
"reflect"
)
// TODO: eventually move this in an util package
type startable interface {
Start(stopCh <-chan struct{})
}
type informer interface {
startable
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
}
func startInformers[T startable](ctx context.Context, informers ...T) {
for i := range informers {
informers[i].Start(ctx.Done())
}
}
func waitForCacheSync(ctx context.Context, informers ...informer) bool {
ret := true
for i := range informers {
for _, result := range informers[i].WaitForCacheSync(ctx.Done()) {
ret = ret && result
}
}
return ret
}
func startInformersAndWaitForCacheSync(ctx context.Context, informers ...informer) bool {
startInformers(ctx, informers...)
return waitForCacheSync(ctx, informers...)
}

View file

@ -1,4 +1,110 @@
package main
func main() {
import (
"context"
"flag"
"fmt"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/logging"
corev1 "k8s.io/api/core/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
kubeconfig string
clientRateLimitQPS float64
clientRateLimitBurst int
logFormat string
)
const (
resyncPeriod = 15 * time.Minute
)
func parseFlags() error {
logging.Init(nil)
flag.StringVar(&logFormat, "loggingFormat", logging.TextFormat, "This determines the output format of the logger.")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.Float64Var(&clientRateLimitQPS, "clientRateLimitQPS", 20, "Configure the maximum QPS to the Kubernetes API server from Kyverno. Uses the client default if zero.")
flag.IntVar(&clientRateLimitBurst, "clientRateLimitBurst", 50, "Configure the maximum burst for throttle. Uses the client default if zero.")
if err := flag.Set("v", "2"); err != nil {
return err
}
flag.Parse()
return nil
}
func createKubeClients(logger logr.Logger) (*rest.Config, *kubernetes.Clientset, error) {
logger = logger.WithName("kube-clients")
logger.Info("create kube clients...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
clientConfig, err := config.CreateClientConfig(kubeconfig, clientRateLimitQPS, clientRateLimitBurst)
if err != nil {
return nil, nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, nil, err
}
return clientConfig, kubeClient, nil
}
func setupSignals() (context.Context, context.CancelFunc) {
return signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
}
func main() {
// parse flags
if err := parseFlags(); err != nil {
fmt.Println("failed to parse flags", err)
os.Exit(1)
}
// setup logger
logLevel, err := strconv.Atoi(flag.Lookup("v").Value.String())
if err != nil {
fmt.Println("failed to setup logger", err)
os.Exit(1)
}
if err := logging.Setup(logFormat, logLevel); err != nil {
fmt.Println("failed to setup logger", err)
os.Exit(1)
}
logger := logging.WithName("setup")
// create client config and kube clients
_, kubeClient, err := createKubeClients(logger)
if err != nil {
os.Exit(1)
}
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
// setup signals
signalCtx, signalCancel := setupSignals()
defer signalCancel()
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
server := NewServer(
func() ([]byte, []byte, error) {
secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
if err != nil {
return nil, nil, err
}
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
},
)
// start informers and wait for cache sync
// we need to call start again because we potentially registered new informers
if !startInformersAndWaitForCacheSync(signalCtx, kubeKyvernoInformer) {
os.Exit(1)
}
// start webhooks server
server.Run(signalCtx.Done())
// wait for termination signal
<-signalCtx.Done()
}

View file

@ -0,0 +1,88 @@
package main
import (
"context"
"crypto/tls"
"net/http"
"time"
"github.com/go-logr/logr"
"github.com/julienschmidt/httprouter"
"github.com/kyverno/kyverno/pkg/logging"
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
admissionv1 "k8s.io/api/admission/v1"
)
type Server interface {
// Run TLS server in separate thread and returns control immediately
Run(<-chan struct{})
// Stop TLS server and returns control after the server is shut down
Stop(context.Context)
}
type server struct {
server *http.Server
}
type TlsProvider func() ([]byte, []byte, error)
func TODO(logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse {
return admissionutils.ResponseSuccess()
}
// NewServer creates new instance of server accordingly to given configuration
func NewServer(
tlsProvider TlsProvider,
) Server {
mux := httprouter.New()
mux.HandlerFunc(
"POST",
"/todo",
handlers.AdmissionHandler(TODO).
WithAdmission(logging.WithName("todo")),
)
return &server{
server: &http.Server{
Addr: ":9443",
TLSConfig: &tls.Config{
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
certPem, keyPem, err := tlsProvider()
if err != nil {
return nil, err
}
pair, err := tls.X509KeyPair(certPem, keyPem)
if err != nil {
return nil, err
}
return &pair, nil
},
MinVersion: tls.VersionTLS12,
},
Handler: mux,
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
ReadHeaderTimeout: 30 * time.Second,
IdleTimeout: 5 * time.Minute,
// ErrorLog: logging.StdLogger(logger.WithName("server"), ""),
},
}
}
func (s *server) Run(stopCh <-chan struct{}) {
go func() {
if err := s.server.ListenAndServeTLS("", ""); err != nil {
logging.Error(err, "failed to start server")
}
}()
}
func (s *server) Stop(ctx context.Context) {
err := s.server.Shutdown(ctx)
if err != nil {
err = s.server.Close()
if err != nil {
logging.Error(err, "failed to start server")
}
}
}