mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
* Make server ports configurable, resolves #7279 Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> * Make server ports configurable, resolves #7279 Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> * Switch to flags instead of env vars Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> * Could not use internal package in webhooks pkg Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> * Add helm chart changes Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> * make codegen-docs-all Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> * make codegen-manifest-all Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> --------- Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com> Co-authored-by: Dirk Pahl <dirk.pahl@deutschebahn.com> Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
parent
0f20b90647
commit
d8c2c5818d
12 changed files with 59 additions and 7 deletions
|
@ -341,6 +341,7 @@ The chart values are organised per component.
|
||||||
| admissionController.apiPriorityAndFairness | bool | `false` | Change `apiPriorityAndFairness` to `true` if you want to insulate the API calls made by Kyverno admission controller activities. This will help ensure Kyverno stability in busy clusters. Ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/ |
|
| admissionController.apiPriorityAndFairness | bool | `false` | Change `apiPriorityAndFairness` to `true` if you want to insulate the API calls made by Kyverno admission controller activities. This will help ensure Kyverno stability in busy clusters. Ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/ |
|
||||||
| admissionController.priorityLevelConfigurationSpec | object | See [values.yaml](values.yaml) | Priority level configuration. The block is directly forwarded into the priorityLevelConfiguration, so you can use whatever specification you want. ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration |
|
| admissionController.priorityLevelConfigurationSpec | object | See [values.yaml](values.yaml) | Priority level configuration. The block is directly forwarded into the priorityLevelConfiguration, so you can use whatever specification you want. ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration |
|
||||||
| admissionController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
|
| admissionController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
|
||||||
|
| admissionController.webhookServer | object | `{"port":9443}` | admissionController webhook server port in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to |
|
||||||
| admissionController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
|
| admissionController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
|
||||||
| admissionController.startupProbe | object | See [values.yaml](values.yaml) | Startup probe. The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
| admissionController.startupProbe | object | See [values.yaml](values.yaml) | Startup probe. The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||||
| admissionController.livenessProbe | object | See [values.yaml](values.yaml) | Liveness probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
| admissionController.livenessProbe | object | See [values.yaml](values.yaml) | Liveness probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
|
||||||
|
@ -498,6 +499,8 @@ The chart values are organised per component.
|
||||||
| cleanupController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
|
| cleanupController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
|
||||||
| cleanupController.priorityClassName | string | `""` | Optional priority class |
|
| cleanupController.priorityClassName | string | `""` | Optional priority class |
|
||||||
| cleanupController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
|
| cleanupController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
|
||||||
|
| cleanupController.server | object | `{"port":9443}` | cleanupController server port in case you are using hostNetwork: true, you might want to change the port the cleanupController is listening to |
|
||||||
|
| cleanupController.webhookServer | object | `{"port":9443}` | cleanupController webhook server port in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to |
|
||||||
| cleanupController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
|
| cleanupController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
|
||||||
| cleanupController.extraArgs | object | `{}` | Extra arguments passed to the container on the command line |
|
| cleanupController.extraArgs | object | `{}` | Extra arguments passed to the container on the command line |
|
||||||
| cleanupController.extraEnvVars | list | `[]` | Additional container environment variables. |
|
| cleanupController.extraEnvVars | list | `[]` | Additional container environment variables. |
|
||||||
|
|
|
@ -132,6 +132,7 @@ spec:
|
||||||
- --tlsSecretName={{ template "kyverno.admission-controller.serviceName" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
|
- --tlsSecretName={{ template "kyverno.admission-controller.serviceName" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
|
||||||
- --backgroundServiceAccountName=system:serviceaccount:{{ include "kyverno.namespace" . }}:{{ include "kyverno.background-controller.serviceAccountName" . }}
|
- --backgroundServiceAccountName=system:serviceaccount:{{ include "kyverno.namespace" . }}:{{ include "kyverno.background-controller.serviceAccountName" . }}
|
||||||
- --servicePort={{ .Values.admissionController.service.port }}
|
- --servicePort={{ .Values.admissionController.service.port }}
|
||||||
|
- --webhookServerPort={{ .Values.admissionController.webhookServer.port }}
|
||||||
{{- if .Values.admissionController.tracing.enabled }}
|
{{- if .Values.admissionController.tracing.enabled }}
|
||||||
- --enableTracing
|
- --enableTracing
|
||||||
- --tracingAddress={{ .Values.admissionController.tracing.address }}
|
- --tracingAddress={{ .Values.admissionController.tracing.address }}
|
||||||
|
|
|
@ -90,6 +90,8 @@ spec:
|
||||||
- --caSecretName={{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-ca
|
- --caSecretName={{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-ca
|
||||||
- --tlsSecretName={{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
|
- --tlsSecretName={{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
|
||||||
- --servicePort={{ .Values.cleanupController.service.port }}
|
- --servicePort={{ .Values.cleanupController.service.port }}
|
||||||
|
- --cleanupServerPort={{ .Values.cleanupController.server.port }}
|
||||||
|
- --webhookServerPort={{ .Values.cleanupController.webhookServer.port }}
|
||||||
{{- if .Values.cleanupController.tracing.enabled }}
|
{{- if .Values.cleanupController.tracing.enabled }}
|
||||||
- --enableTracing
|
- --enableTracing
|
||||||
- --tracingAddress={{ .Values.cleanupController.tracing.address }}
|
- --tracingAddress={{ .Values.cleanupController.tracing.address }}
|
||||||
|
|
|
@ -713,6 +713,11 @@ admissionController:
|
||||||
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
|
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
|
||||||
hostNetwork: false
|
hostNetwork: false
|
||||||
|
|
||||||
|
# -- admissionController webhook server port
|
||||||
|
# in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to
|
||||||
|
webhookServer:
|
||||||
|
port: 9443
|
||||||
|
|
||||||
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
|
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
|
||||||
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
|
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
|
||||||
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
|
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
|
||||||
|
@ -1343,6 +1348,15 @@ cleanupController:
|
||||||
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
|
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
|
||||||
hostNetwork: false
|
hostNetwork: false
|
||||||
|
|
||||||
|
# -- cleanupController server port
|
||||||
|
# in case you are using hostNetwork: true, you might want to change the port the cleanupController is listening to
|
||||||
|
server:
|
||||||
|
port: 9443
|
||||||
|
# -- cleanupController webhook server port
|
||||||
|
# in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to
|
||||||
|
webhookServer:
|
||||||
|
port: 9443
|
||||||
|
|
||||||
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
|
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
|
||||||
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
|
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
|
||||||
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
|
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
|
||||||
|
|
|
@ -61,16 +61,18 @@ func (probes) IsLive(context.Context) bool {
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var (
|
var (
|
||||||
dumpPayload bool
|
dumpPayload bool
|
||||||
serverIP string
|
serverIP string
|
||||||
servicePort int
|
servicePort int
|
||||||
maxQueuedEvents int
|
webhookServerPort int
|
||||||
interval time.Duration
|
maxQueuedEvents int
|
||||||
|
interval time.Duration
|
||||||
)
|
)
|
||||||
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
||||||
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
|
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
|
||||||
flagset.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
flagset.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
||||||
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
|
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
|
||||||
|
flagset.IntVar(&webhookServerPort, "webhookServerPort", 9443, "Port used by the webhook server.")
|
||||||
flagset.IntVar(&maxQueuedEvents, "maxQueuedEvents", 1000, "Maximum events to be queued.")
|
flagset.IntVar(&maxQueuedEvents, "maxQueuedEvents", 1000, "Maximum events to be queued.")
|
||||||
flagset.DurationVar(&interval, "ttlReconciliationInterval", time.Minute, "Set this flag to set the interval after which the resource controller reconciliation should occur")
|
flagset.DurationVar(&interval, "ttlReconciliationInterval", time.Minute, "Set this flag to set the interval after which the resource controller reconciliation should occur")
|
||||||
flagset.StringVar(&caSecretName, "caSecretName", "", "Name of the secret containing CA.")
|
flagset.StringVar(&caSecretName, "caSecretName", "", "Name of the secret containing CA.")
|
||||||
|
@ -194,6 +196,7 @@ func main() {
|
||||||
config.CleanupValidatingWebhookServicePath,
|
config.CleanupValidatingWebhookServicePath,
|
||||||
serverIP,
|
serverIP,
|
||||||
int32(servicePort),
|
int32(servicePort),
|
||||||
|
int32(webhookServerPort),
|
||||||
nil,
|
nil,
|
||||||
[]admissionregistrationv1.RuleWithOperations{
|
[]admissionregistrationv1.RuleWithOperations{
|
||||||
{
|
{
|
||||||
|
@ -229,6 +232,7 @@ func main() {
|
||||||
config.TtlValidatingWebhookServicePath,
|
config.TtlValidatingWebhookServicePath,
|
||||||
serverIP,
|
serverIP,
|
||||||
int32(servicePort),
|
int32(servicePort),
|
||||||
|
int32(webhookServerPort),
|
||||||
&metav1.LabelSelector{
|
&metav1.LabelSelector{
|
||||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/julienschmidt/httprouter"
|
"github.com/julienschmidt/httprouter"
|
||||||
|
"github.com/kyverno/kyverno/cmd/internal"
|
||||||
"github.com/kyverno/kyverno/pkg/config"
|
"github.com/kyverno/kyverno/pkg/config"
|
||||||
"github.com/kyverno/kyverno/pkg/logging"
|
"github.com/kyverno/kyverno/pkg/logging"
|
||||||
"github.com/kyverno/kyverno/pkg/metrics"
|
"github.com/kyverno/kyverno/pkg/metrics"
|
||||||
|
@ -75,7 +76,7 @@ func NewServer(
|
||||||
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(probes.IsReady))
|
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(probes.IsReady))
|
||||||
return &server{
|
return &server{
|
||||||
server: &http.Server{
|
server: &http.Server{
|
||||||
Addr: ":9443",
|
Addr: ":" + internal.CleanupServerPort(),
|
||||||
TLSConfig: &tls.Config{
|
TLSConfig: &tls.Config{
|
||||||
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
certPem, keyPem, err := tlsProvider()
|
certPem, keyPem, err := tlsProvider()
|
||||||
|
|
|
@ -48,6 +48,8 @@ var (
|
||||||
registryCredentialHelpers string
|
registryCredentialHelpers string
|
||||||
// leader election
|
// leader election
|
||||||
leaderElectionRetryPeriod time.Duration
|
leaderElectionRetryPeriod time.Duration
|
||||||
|
// cleanupServerPort is the kyverno cleanup server port
|
||||||
|
cleanupServerPort string
|
||||||
// image verify cache
|
// image verify cache
|
||||||
imageVerifyCacheEnabled bool
|
imageVerifyCacheEnabled bool
|
||||||
imageVerifyCacheTTLDuration time.Duration
|
imageVerifyCacheTTLDuration time.Duration
|
||||||
|
@ -123,6 +125,10 @@ func initLeaderElectionFlags() {
|
||||||
flag.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
flag.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func initCleanupFlags() {
|
||||||
|
flag.StringVar(&cleanupServerPort, "cleanupServerPort", "9443", "kyverno cleanup server port, defaults to '9443'.")
|
||||||
|
}
|
||||||
|
|
||||||
type options struct {
|
type options struct {
|
||||||
clientRateLimitQPS float64
|
clientRateLimitQPS float64
|
||||||
clientRateLimitBurst int
|
clientRateLimitBurst int
|
||||||
|
@ -202,6 +208,9 @@ func initFlags(config Configuration, opts ...Option) {
|
||||||
if config.UsesLeaderElection() {
|
if config.UsesLeaderElection() {
|
||||||
initLeaderElectionFlags()
|
initLeaderElectionFlags()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
initCleanupFlags()
|
||||||
|
|
||||||
for _, flagset := range config.FlagSets() {
|
for _, flagset := range config.FlagSets() {
|
||||||
flagset.VisitAll(func(f *flag.Flag) {
|
flagset.VisitAll(func(f *flag.Flag) {
|
||||||
flag.CommandLine.Var(f.Value, f.Name, f.Usage)
|
flag.CommandLine.Var(f.Value, f.Name, f.Usage)
|
||||||
|
@ -234,6 +243,10 @@ func LeaderElectionRetryPeriod() time.Duration {
|
||||||
return leaderElectionRetryPeriod
|
return leaderElectionRetryPeriod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CleanupServerPort() string {
|
||||||
|
return cleanupServerPort
|
||||||
|
}
|
||||||
|
|
||||||
func printFlagSettings(logger logr.Logger) {
|
func printFlagSettings(logger logr.Logger) {
|
||||||
logger = logger.WithName("flag")
|
logger = logger.WithName("flag")
|
||||||
flag.VisitAll(func(f *flag.Flag) {
|
flag.VisitAll(func(f *flag.Flag) {
|
||||||
|
|
|
@ -116,6 +116,7 @@ func createrLeaderControllers(
|
||||||
certRenewer tls.CertRenewer,
|
certRenewer tls.CertRenewer,
|
||||||
runtime runtimeutils.Runtime,
|
runtime runtimeutils.Runtime,
|
||||||
servicePort int32,
|
servicePort int32,
|
||||||
|
webhookServerPort int32,
|
||||||
configuration config.Configuration,
|
configuration config.Configuration,
|
||||||
eventGenerator event.Interface,
|
eventGenerator event.Interface,
|
||||||
) ([]internal.Controller, func(context.Context) error, error) {
|
) ([]internal.Controller, func(context.Context) error, error) {
|
||||||
|
@ -145,6 +146,7 @@ func createrLeaderControllers(
|
||||||
serverIP,
|
serverIP,
|
||||||
int32(webhookTimeout),
|
int32(webhookTimeout),
|
||||||
servicePort,
|
servicePort,
|
||||||
|
webhookServerPort,
|
||||||
autoUpdateWebhooks,
|
autoUpdateWebhooks,
|
||||||
admissionReports,
|
admissionReports,
|
||||||
runtime,
|
runtime,
|
||||||
|
@ -160,6 +162,7 @@ func createrLeaderControllers(
|
||||||
config.ExceptionValidatingWebhookServicePath,
|
config.ExceptionValidatingWebhookServicePath,
|
||||||
serverIP,
|
serverIP,
|
||||||
servicePort,
|
servicePort,
|
||||||
|
webhookServerPort,
|
||||||
nil,
|
nil,
|
||||||
[]admissionregistrationv1.RuleWithOperations{{
|
[]admissionregistrationv1.RuleWithOperations{{
|
||||||
Rule: admissionregistrationv1.Rule{
|
Rule: admissionregistrationv1.Rule{
|
||||||
|
@ -212,6 +215,7 @@ func main() {
|
||||||
admissionReports bool
|
admissionReports bool
|
||||||
dumpPayload bool
|
dumpPayload bool
|
||||||
servicePort int
|
servicePort int
|
||||||
|
webhookServerPort int
|
||||||
backgroundServiceAccountName string
|
backgroundServiceAccountName string
|
||||||
)
|
)
|
||||||
flagset := flag.NewFlagSet("kyverno", flag.ExitOnError)
|
flagset := flag.NewFlagSet("kyverno", flag.ExitOnError)
|
||||||
|
@ -227,6 +231,7 @@ func main() {
|
||||||
flagset.Func(toggle.GenerateValidatingAdmissionPolicyFlagName, toggle.GenerateValidatingAdmissionPolicyDescription, toggle.GenerateValidatingAdmissionPolicy.Parse)
|
flagset.Func(toggle.GenerateValidatingAdmissionPolicyFlagName, toggle.GenerateValidatingAdmissionPolicyDescription, toggle.GenerateValidatingAdmissionPolicy.Parse)
|
||||||
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
|
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
|
||||||
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
|
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
|
||||||
|
flagset.IntVar(&webhookServerPort, "webhookServerPort", 9443, "Port used by the webhook server.")
|
||||||
flagset.StringVar(&backgroundServiceAccountName, "backgroundServiceAccountName", "", "Background service account name.")
|
flagset.StringVar(&backgroundServiceAccountName, "backgroundServiceAccountName", "", "Background service account name.")
|
||||||
flagset.StringVar(&caSecretName, "caSecretName", "", "Name of the secret containing CA.")
|
flagset.StringVar(&caSecretName, "caSecretName", "", "Name of the secret containing CA.")
|
||||||
flagset.StringVar(&tlsSecretName, "tlsSecretName", "", "Name of the secret containing TLS pair.")
|
flagset.StringVar(&tlsSecretName, "tlsSecretName", "", "Name of the secret containing TLS pair.")
|
||||||
|
@ -413,6 +418,7 @@ func main() {
|
||||||
certRenewer,
|
certRenewer,
|
||||||
runtime,
|
runtime,
|
||||||
int32(servicePort),
|
int32(servicePort),
|
||||||
|
int32(webhookServerPort),
|
||||||
setup.Configuration,
|
setup.Configuration,
|
||||||
eventGenerator,
|
eventGenerator,
|
||||||
)
|
)
|
||||||
|
@ -512,6 +518,7 @@ func main() {
|
||||||
kubeInformer.Rbac().V1().RoleBindings().Lister(),
|
kubeInformer.Rbac().V1().RoleBindings().Lister(),
|
||||||
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
|
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
|
||||||
setup.KyvernoDynamicClient.Discovery(),
|
setup.KyvernoDynamicClient.Discovery(),
|
||||||
|
int32(webhookServerPort),
|
||||||
)
|
)
|
||||||
// start informers and wait for cache sync
|
// start informers and wait for cache sync
|
||||||
// we need to call start again because we potentially registered new informers
|
// we need to call start again because we potentially registered new informers
|
||||||
|
|
|
@ -45525,6 +45525,7 @@ spec:
|
||||||
- --tlsSecretName=kyverno-svc.kyverno.svc.kyverno-tls-pair
|
- --tlsSecretName=kyverno-svc.kyverno.svc.kyverno-tls-pair
|
||||||
- --backgroundServiceAccountName=system:serviceaccount:kyverno:kyverno-background-controller
|
- --backgroundServiceAccountName=system:serviceaccount:kyverno:kyverno-background-controller
|
||||||
- --servicePort=443
|
- --servicePort=443
|
||||||
|
- --webhookServerPort=9443
|
||||||
- --disableMetrics=false
|
- --disableMetrics=false
|
||||||
- --otelConfig=prometheus
|
- --otelConfig=prometheus
|
||||||
- --metricsPort=8000
|
- --metricsPort=8000
|
||||||
|
@ -45779,6 +45780,8 @@ spec:
|
||||||
- --caSecretName=kyverno-cleanup-controller.kyverno.svc.kyverno-tls-ca
|
- --caSecretName=kyverno-cleanup-controller.kyverno.svc.kyverno-tls-ca
|
||||||
- --tlsSecretName=kyverno-cleanup-controller.kyverno.svc.kyverno-tls-pair
|
- --tlsSecretName=kyverno-cleanup-controller.kyverno.svc.kyverno-tls-pair
|
||||||
- --servicePort=443
|
- --servicePort=443
|
||||||
|
- --cleanupServerPort=9443
|
||||||
|
- --webhookServerPort=9443
|
||||||
- --disableMetrics=false
|
- --disableMetrics=false
|
||||||
- --otelConfig=prometheus
|
- --otelConfig=prometheus
|
||||||
- --metricsPort=8000
|
- --metricsPort=8000
|
||||||
|
|
|
@ -71,6 +71,7 @@ func NewController(
|
||||||
path string,
|
path string,
|
||||||
server string,
|
server string,
|
||||||
servicePort int32,
|
servicePort int32,
|
||||||
|
webhookServerPort int32,
|
||||||
labelSelector *metav1.LabelSelector,
|
labelSelector *metav1.LabelSelector,
|
||||||
rules []admissionregistrationv1.RuleWithOperations,
|
rules []admissionregistrationv1.RuleWithOperations,
|
||||||
failurePolicy *admissionregistrationv1.FailurePolicyType,
|
failurePolicy *admissionregistrationv1.FailurePolicyType,
|
||||||
|
|
|
@ -122,6 +122,7 @@ func NewController(
|
||||||
server string,
|
server string,
|
||||||
defaultTimeout int32,
|
defaultTimeout int32,
|
||||||
servicePort int32,
|
servicePort int32,
|
||||||
|
webhookServerPort int32,
|
||||||
autoUpdateWebhooks bool,
|
autoUpdateWebhooks bool,
|
||||||
admissionReports bool,
|
admissionReports bool,
|
||||||
runtime runtimeutils.Runtime,
|
runtime runtimeutils.Runtime,
|
||||||
|
|
|
@ -3,6 +3,7 @@ package webhooks
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -82,6 +83,7 @@ func NewServer(
|
||||||
rbLister rbacv1listers.RoleBindingLister,
|
rbLister rbacv1listers.RoleBindingLister,
|
||||||
crbLister rbacv1listers.ClusterRoleBindingLister,
|
crbLister rbacv1listers.ClusterRoleBindingLister,
|
||||||
discovery dclient.IDiscovery,
|
discovery dclient.IDiscovery,
|
||||||
|
webhookServerPort int32,
|
||||||
) Server {
|
) Server {
|
||||||
mux := httprouter.New()
|
mux := httprouter.New()
|
||||||
resourceLogger := logger.WithName("resource")
|
resourceLogger := logger.WithName("resource")
|
||||||
|
@ -161,7 +163,7 @@ func NewServer(
|
||||||
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(runtime.IsReady))
|
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(runtime.IsReady))
|
||||||
return &server{
|
return &server{
|
||||||
server: &http.Server{
|
server: &http.Server{
|
||||||
Addr: ":9443",
|
Addr: fmt.Sprintf(":%d", webhookServerPort),
|
||||||
TLSConfig: &tls.Config{
|
TLSConfig: &tls.Config{
|
||||||
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
certPem, keyPem, err := tlsProvider()
|
certPem, keyPem, err := tlsProvider()
|
||||||
|
|
Loading…
Add table
Reference in a new issue