1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-05 07:26:55 +00:00

refactor: use context in dynamic client instead of chan (#4756)

* refactor: use context in dynamic client instead of chan

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-09-30 10:12:21 +02:00 committed by GitHub
parent f40a3bc8f5
commit c42851a37a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 11 additions and 10 deletions

View file

@ -668,7 +668,7 @@ kind-deploy-kyverno: kind-load-all ## Build images, load them in kind cluster an
--set image.tag=$(IMAGE_TAG_DEV) \
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
--set initImage.tag=$(IMAGE_TAG_DEV) \
--set "extraArgs={--autogenInternals=true,--loggingFormat=json}"
--set "extraArgs={--autogenInternals=true,--loggingFormat=text}"
@echo Restart kyverno pods... >&2
@kubectl rollout restart deployment -n kyverno kyverno

View file

@ -1,6 +1,7 @@
package apply
import (
"context"
"fmt"
"os"
"path/filepath"
@ -199,7 +200,7 @@ func (c *ApplyCommandConfig) applyCommandHelper() (rc *common.ResultCounts, reso
if err != nil {
return rc, resources, skipInvalidPolicies, pvInfos, err
}
dClient, err = dclient.NewClient(restConfig, kubeClient, nil, 15*time.Minute, make(chan struct{}))
dClient, err = dclient.NewClient(context.Background(), restConfig, kubeClient, nil, 15*time.Minute)
if err != nil {
return rc, resources, skipInvalidPolicies, pvInfos, err
}

View file

@ -82,7 +82,7 @@ func main() {
// DYNAMIC CLIENT
// - client for all registered resources
client, err := dclient.NewClient(clientConfig, kubeClient, nil, 15*time.Minute, stopCh)
client, err := dclient.NewClient(signalCtx, clientConfig, kubeClient, nil, 15*time.Minute)
if err != nil {
setupLog.Error(err, "Failed to create client")
os.Exit(1)
@ -102,7 +102,6 @@ func main() {
requests := []request{
{policyReportKind},
{clusterPolicyReportKind},
{convertGenerateRequest},
}
@ -139,7 +138,7 @@ func main() {
os.Exit(1)
}
// use pipline to pass request to cleanup resources
// use pipeline to pass request to cleanup resources
in := gen(done, stopCh, requests...)
// process requests
// processing routine count : 2

View file

@ -204,7 +204,7 @@ func main() {
logger.Error(err, "Failed to create client")
os.Exit(1)
}
dynamicClient, err := dclient.NewClient(clientConfig, kubeClient, metricsConfig, metadataResyncPeriod, stopCh)
dynamicClient, err := dclient.NewClient(signalCtx, clientConfig, kubeClient, metricsConfig, metadataResyncPeriod)
if err != nil {
logger.Error(err, "Failed to create dynamic client")
os.Exit(1)

View file

@ -63,7 +63,7 @@ type client struct {
}
// NewClient creates new instance of client
func NewClient(config *rest.Config, kclient *kubernetes.Clientset, metricsConfig metrics.MetricsConfigManager, resync time.Duration, stopCh <-chan struct{}) (Interface, error) {
func NewClient(ctx context.Context, config *rest.Config, kclient *kubernetes.Clientset, metricsConfig metrics.MetricsConfigManager, resync time.Duration) (Interface, error) {
dclient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, err
@ -88,7 +88,7 @@ func NewClient(config *rest.Config, kclient *kubernetes.Clientset, metricsConfig
// we will be invalidating the local cache, so the next request get a fresh cache
// If a resource is removed then and cache is not invalidate yet, we will not detect the removal
// but the re-sync shall re-evaluate
go discoveryClient.Poll(resync, stopCh)
go discoveryClient.Poll(ctx, resync)
client.SetDiscovery(discoveryClient)
return &client, nil
}

View file

@ -1,6 +1,7 @@
package dclient
import (
"context"
"fmt"
"strings"
"time"
@ -40,7 +41,7 @@ func (c serverPreferredResources) DiscoveryInterface() discovery.DiscoveryInterf
}
// Poll will keep invalidate the local cache
func (c serverPreferredResources) Poll(resync time.Duration, stopCh <-chan struct{}) {
func (c serverPreferredResources) Poll(ctx context.Context, resync time.Duration) {
logger := logger.WithName("Poll")
// start a ticker
ticker := time.NewTicker(resync)
@ -48,7 +49,7 @@ func (c serverPreferredResources) Poll(resync time.Duration, stopCh <-chan struc
logger.V(4).Info("starting registered resources sync", "period", resync)
for {
select {
case <-stopCh:
case <-ctx.Done():
logger.Info("stopping registered resources sync")
return
case <-ticker.C: