1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 10:28:36 +00:00

Make server ports configurable, resolves #7279 (#7728)

* Make server ports configurable, resolves #7279

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

* Make server ports configurable, resolves #7279

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

* Switch to flags instead of env vars

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

* Could not use internal package in webhooks pkg

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

* Add helm chart changes

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

* make codegen-docs-all

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

* make codegen-manifest-all

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>

---------

Signed-off-by: Dirk Pahl <dirk.pahl@deutschebahn.com>
Co-authored-by: Dirk Pahl <dirk.pahl@deutschebahn.com>
Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
Dirk Pahl 2023-11-17 15:19:53 +01:00 committed by GitHub
parent 0f20b90647
commit d8c2c5818d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 59 additions and 7 deletions

View file

@ -341,6 +341,7 @@ The chart values are organised per component.
| admissionController.apiPriorityAndFairness | bool | `false` | Change `apiPriorityAndFairness` to `true` if you want to insulate the API calls made by Kyverno admission controller activities. This will help ensure Kyverno stability in busy clusters. Ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/ |
| admissionController.priorityLevelConfigurationSpec | object | See [values.yaml](values.yaml) | Priority level configuration. The block is directly forwarded into the priorityLevelConfiguration, so you can use whatever specification you want. ref: https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration |
| admissionController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
| admissionController.webhookServer | object | `{"port":9443}` | admissionController webhook server port in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to |
| admissionController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
| admissionController.startupProbe | object | See [values.yaml](values.yaml) | Startup probe. The block is directly forwarded into the deployment, so you can use whatever startupProbes configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
| admissionController.livenessProbe | object | See [values.yaml](values.yaml) | Liveness probe. The block is directly forwarded into the deployment, so you can use whatever livenessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ |
@ -498,6 +499,8 @@ The chart values are organised per component.
| cleanupController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
| cleanupController.priorityClassName | string | `""` | Optional priority class |
| cleanupController.hostNetwork | bool | `false` | Change `hostNetwork` to `true` when you want the pod to share its host's network namespace. Useful for situations like when you end up dealing with a custom CNI over Amazon EKS. Update the `dnsPolicy` accordingly as well to suit the host network mode. |
| cleanupController.server | object | `{"port":9443}` | cleanupController server port in case you are using hostNetwork: true, you might want to change the port the cleanupController is listening to |
| cleanupController.webhookServer | object | `{"port":9443}` | cleanupController webhook server port in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to |
| cleanupController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. |
| cleanupController.extraArgs | object | `{}` | Extra arguments passed to the container on the command line |
| cleanupController.extraEnvVars | list | `[]` | Additional container environment variables. |

View file

@ -132,6 +132,7 @@ spec:
- --tlsSecretName={{ template "kyverno.admission-controller.serviceName" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
- --backgroundServiceAccountName=system:serviceaccount:{{ include "kyverno.namespace" . }}:{{ include "kyverno.background-controller.serviceAccountName" . }}
- --servicePort={{ .Values.admissionController.service.port }}
- --webhookServerPort={{ .Values.admissionController.webhookServer.port }}
{{- if .Values.admissionController.tracing.enabled }}
- --enableTracing
- --tracingAddress={{ .Values.admissionController.tracing.address }}

View file

@ -90,6 +90,8 @@ spec:
- --caSecretName={{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-ca
- --tlsSecretName={{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
- --servicePort={{ .Values.cleanupController.service.port }}
- --cleanupServerPort={{ .Values.cleanupController.server.port }}
- --webhookServerPort={{ .Values.cleanupController.webhookServer.port }}
{{- if .Values.cleanupController.tracing.enabled }}
- --enableTracing
- --tracingAddress={{ .Values.cleanupController.tracing.address }}

View file

@ -713,6 +713,11 @@ admissionController:
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
hostNetwork: false
# -- admissionController webhook server port
# in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to
webhookServer:
port: 9443
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.
@ -1343,6 +1348,15 @@ cleanupController:
# Update the `dnsPolicy` accordingly as well to suit the host network mode.
hostNetwork: false
# -- cleanupController server port
# in case you are using hostNetwork: true, you might want to change the port the cleanupController is listening to
server:
port: 9443
# -- cleanupController webhook server port
# in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to
webhookServer:
port: 9443
# -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster.
# In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`.
# For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy.

View file

@ -61,16 +61,18 @@ func (probes) IsLive(context.Context) bool {
func main() {
var (
dumpPayload bool
serverIP string
servicePort int
maxQueuedEvents int
interval time.Duration
dumpPayload bool
serverIP string
servicePort int
webhookServerPort int
maxQueuedEvents int
interval time.Duration
)
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
flagset.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
flagset.IntVar(&webhookServerPort, "webhookServerPort", 9443, "Port used by the webhook server.")
flagset.IntVar(&maxQueuedEvents, "maxQueuedEvents", 1000, "Maximum events to be queued.")
flagset.DurationVar(&interval, "ttlReconciliationInterval", time.Minute, "Set this flag to set the interval after which the resource controller reconciliation should occur")
flagset.StringVar(&caSecretName, "caSecretName", "", "Name of the secret containing CA.")
@ -194,6 +196,7 @@ func main() {
config.CleanupValidatingWebhookServicePath,
serverIP,
int32(servicePort),
int32(webhookServerPort),
nil,
[]admissionregistrationv1.RuleWithOperations{
{
@ -229,6 +232,7 @@ func main() {
config.TtlValidatingWebhookServicePath,
serverIP,
int32(servicePort),
int32(webhookServerPort),
&metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{

View file

@ -8,6 +8,7 @@ import (
"github.com/go-logr/logr"
"github.com/julienschmidt/httprouter"
"github.com/kyverno/kyverno/cmd/internal"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/metrics"
@ -75,7 +76,7 @@ func NewServer(
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(probes.IsReady))
return &server{
server: &http.Server{
Addr: ":9443",
Addr: ":" + internal.CleanupServerPort(),
TLSConfig: &tls.Config{
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
certPem, keyPem, err := tlsProvider()

View file

@ -48,6 +48,8 @@ var (
registryCredentialHelpers string
// leader election
leaderElectionRetryPeriod time.Duration
// cleanupServerPort is the kyverno cleanup server port
cleanupServerPort string
// image verify cache
imageVerifyCacheEnabled bool
imageVerifyCacheTTLDuration time.Duration
@ -123,6 +125,10 @@ func initLeaderElectionFlags() {
flag.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
}
func initCleanupFlags() {
flag.StringVar(&cleanupServerPort, "cleanupServerPort", "9443", "kyverno cleanup server port, defaults to '9443'.")
}
type options struct {
clientRateLimitQPS float64
clientRateLimitBurst int
@ -202,6 +208,9 @@ func initFlags(config Configuration, opts ...Option) {
if config.UsesLeaderElection() {
initLeaderElectionFlags()
}
initCleanupFlags()
for _, flagset := range config.FlagSets() {
flagset.VisitAll(func(f *flag.Flag) {
flag.CommandLine.Var(f.Value, f.Name, f.Usage)
@ -234,6 +243,10 @@ func LeaderElectionRetryPeriod() time.Duration {
return leaderElectionRetryPeriod
}
func CleanupServerPort() string {
return cleanupServerPort
}
func printFlagSettings(logger logr.Logger) {
logger = logger.WithName("flag")
flag.VisitAll(func(f *flag.Flag) {

View file

@ -116,6 +116,7 @@ func createrLeaderControllers(
certRenewer tls.CertRenewer,
runtime runtimeutils.Runtime,
servicePort int32,
webhookServerPort int32,
configuration config.Configuration,
eventGenerator event.Interface,
) ([]internal.Controller, func(context.Context) error, error) {
@ -145,6 +146,7 @@ func createrLeaderControllers(
serverIP,
int32(webhookTimeout),
servicePort,
webhookServerPort,
autoUpdateWebhooks,
admissionReports,
runtime,
@ -160,6 +162,7 @@ func createrLeaderControllers(
config.ExceptionValidatingWebhookServicePath,
serverIP,
servicePort,
webhookServerPort,
nil,
[]admissionregistrationv1.RuleWithOperations{{
Rule: admissionregistrationv1.Rule{
@ -212,6 +215,7 @@ func main() {
admissionReports bool
dumpPayload bool
servicePort int
webhookServerPort int
backgroundServiceAccountName string
)
flagset := flag.NewFlagSet("kyverno", flag.ExitOnError)
@ -227,6 +231,7 @@ func main() {
flagset.Func(toggle.GenerateValidatingAdmissionPolicyFlagName, toggle.GenerateValidatingAdmissionPolicyDescription, toggle.GenerateValidatingAdmissionPolicy.Parse)
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
flagset.IntVar(&webhookServerPort, "webhookServerPort", 9443, "Port used by the webhook server.")
flagset.StringVar(&backgroundServiceAccountName, "backgroundServiceAccountName", "", "Background service account name.")
flagset.StringVar(&caSecretName, "caSecretName", "", "Name of the secret containing CA.")
flagset.StringVar(&tlsSecretName, "tlsSecretName", "", "Name of the secret containing TLS pair.")
@ -413,6 +418,7 @@ func main() {
certRenewer,
runtime,
int32(servicePort),
int32(webhookServerPort),
setup.Configuration,
eventGenerator,
)
@ -512,6 +518,7 @@ func main() {
kubeInformer.Rbac().V1().RoleBindings().Lister(),
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
setup.KyvernoDynamicClient.Discovery(),
int32(webhookServerPort),
)
// start informers and wait for cache sync
// we need to call start again because we potentially registered new informers

View file

@ -45525,6 +45525,7 @@ spec:
- --tlsSecretName=kyverno-svc.kyverno.svc.kyverno-tls-pair
- --backgroundServiceAccountName=system:serviceaccount:kyverno:kyverno-background-controller
- --servicePort=443
- --webhookServerPort=9443
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000
@ -45779,6 +45780,8 @@ spec:
- --caSecretName=kyverno-cleanup-controller.kyverno.svc.kyverno-tls-ca
- --tlsSecretName=kyverno-cleanup-controller.kyverno.svc.kyverno-tls-pair
- --servicePort=443
- --cleanupServerPort=9443
- --webhookServerPort=9443
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000

View file

@ -71,6 +71,7 @@ func NewController(
path string,
server string,
servicePort int32,
webhookServerPort int32,
labelSelector *metav1.LabelSelector,
rules []admissionregistrationv1.RuleWithOperations,
failurePolicy *admissionregistrationv1.FailurePolicyType,

View file

@ -122,6 +122,7 @@ func NewController(
server string,
defaultTimeout int32,
servicePort int32,
webhookServerPort int32,
autoUpdateWebhooks bool,
admissionReports bool,
runtime runtimeutils.Runtime,

View file

@ -3,6 +3,7 @@ package webhooks
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"time"
@ -82,6 +83,7 @@ func NewServer(
rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister,
discovery dclient.IDiscovery,
webhookServerPort int32,
) Server {
mux := httprouter.New()
resourceLogger := logger.WithName("resource")
@ -161,7 +163,7 @@ func NewServer(
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(runtime.IsReady))
return &server{
server: &http.Server{
Addr: ":9443",
Addr: fmt.Sprintf(":%d", webhookServerPort),
TLSConfig: &tls.Config{
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
certPem, keyPem, err := tlsProvider()