mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] [Platform] Storage V2 (#1750)
This commit is contained in:
parent
4b2f20d1e6
commit
4462669547
43 changed files with 1273 additions and 451 deletions
|
@ -53,6 +53,14 @@ linters-settings:
|
|||
pkg: github.com/arangodb/kube-arangodb/integrations/shutdown/v1
|
||||
- alias: pbShutdownV1
|
||||
pkg: github.com/arangodb/kube-arangodb/integrations/shutdown/v1/definition
|
||||
- alias: pbImplStorageV2
|
||||
pkg: github.com/arangodb/kube-arangodb/integrations/storage/v2
|
||||
- alias: pbImplStorageV2Shared
|
||||
pkg: github.com/arangodb/kube-arangodb/integrations/storage/v2/shared
|
||||
- alias: pbImplStorageV2SharedS3
|
||||
pkg: github.com/arangodb/kube-arangodb/integrations/storage/v2/shared/s3
|
||||
- alias: pbStorageV2
|
||||
pkg: github.com/arangodb/kube-arangodb/integrations/storage/v2/definition
|
||||
- alias: analyticsApi
|
||||
pkg: github.com/arangodb/kube-arangodb/pkg/apis/analytics/v1alpha1
|
||||
- alias: mlApiv1alpha1
|
||||
|
@ -61,6 +69,8 @@ linters-settings:
|
|||
pkg: github.com/arangodb/kube-arangodb/pkg/apis/ml/v1beta1
|
||||
- alias: networkingApi
|
||||
pkg: github.com/arangodb/kube-arangodb/pkg/apis/networking/v1alpha1
|
||||
- alias: platformApi
|
||||
pkg: github.com/arangodb/kube-arangodb/pkg/apis/platform/v1alpha1
|
||||
- alias: schedulerApiv1alpha1
|
||||
pkg: github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1alpha1
|
||||
- alias: schedulerContainerApiv1alpha1
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
- (Feature) (Networking) ArangoRotue WebSocket Support
|
||||
- (Feature) (Scheduler) Helm Driver Param
|
||||
- (Feature) (Integration) Services Endpoint
|
||||
- (Feature) (Platform) Storage
|
||||
|
||||
## [1.2.43](https://github.com/arangodb/kube-arangodb/tree/1.2.43) (2024-10-14)
|
||||
- (Feature) ArangoRoute CRD
|
||||
|
|
|
@ -195,7 +195,7 @@ Flags:
|
|||
--kubernetes.max-batch-size int Size of batch during objects read (default 256)
|
||||
--kubernetes.qps float32 Number of queries per second for k8s API (default 15)
|
||||
--log.format string Set log format. Allowed values: 'pretty', 'JSON'. If empty, default format is used (default "pretty")
|
||||
--log.level stringArray Set log levels in format <level> or <logger>=<level>. Possible loggers: action, agency, api-server, assertion, backup-operator, chaos-monkey, crd, deployment, deployment-ci, deployment-reconcile, deployment-replication, deployment-resilience, deployment-resources, deployment-storage, deployment-storage-pc, deployment-storage-service, generic-parent-operator, helm, http, inspector, integration-config-v1, integration-envoy-auth-v3, integration-scheduler-v2, integrations, k8s-client, kubernetes-informer, monitor, networking-route-operator, operator, operator-arangojob-handler, operator-v2, operator-v2-event, operator-v2-worker, panics, platform-storage-operator, pod_compare, root, root-event-recorder, scheduler-batchjob-operator, scheduler-cronjob-operator, scheduler-deployment-operator, scheduler-pod-operator, scheduler-profile-operator, server, server-authentication (default [info])
|
||||
--log.level stringArray Set log levels in format <level> or <logger>=<level>. Possible loggers: action, agency, api-server, assertion, backup-operator, chaos-monkey, crd, deployment, deployment-ci, deployment-reconcile, deployment-replication, deployment-resilience, deployment-resources, deployment-storage, deployment-storage-pc, deployment-storage-service, generic-parent-operator, helm, http, inspector, integration-config-v1, integration-envoy-auth-v3, integration-scheduler-v2, integration-storage-v2, integrations, k8s-client, kubernetes-informer, monitor, networking-route-operator, operator, operator-arangojob-handler, operator-v2, operator-v2-event, operator-v2-worker, panics, platform-storage-operator, pod_compare, root, root-event-recorder, scheduler-batchjob-operator, scheduler-cronjob-operator, scheduler-deployment-operator, scheduler-pod-operator, scheduler-profile-operator, server, server-authentication (default [info])
|
||||
--log.sampling If true, operator will try to minimize duplication of logging events (default true)
|
||||
--memory-limit uint Define memory limit for hard shutdown and the dump of goroutines. Used for testing
|
||||
--metrics.excluded-prefixes stringArray List of the excluded metrics prefixes
|
||||
|
|
|
@ -116,11 +116,3 @@ Region defines the availability zone name.
|
|||
|
||||
Default Value: `""`
|
||||
|
||||
***
|
||||
|
||||
### .spec.deployment
|
||||
|
||||
Type: `string` <sup>[\[ref\]](https://github.com/arangodb/kube-arangodb/blob/1.2.43/pkg/apis/platform/v1alpha1/storage_spec.go#L29)</sup>
|
||||
|
||||
Deployment specifies the ArangoDeployment object name
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ Flags:
|
|||
--kubernetes.max-batch-size int Size of batch during objects read (default 256)
|
||||
--kubernetes.qps float32 Number of queries per second for k8s API (default 15)
|
||||
--log.format string Set log format. Allowed values: 'pretty', 'JSON'. If empty, default format is used (default "pretty")
|
||||
--log.level stringArray Set log levels in format <level> or <logger>=<level>. Possible loggers: action, agency, api-server, assertion, backup-operator, chaos-monkey, crd, deployment, deployment-ci, deployment-reconcile, deployment-replication, deployment-resilience, deployment-resources, deployment-storage, deployment-storage-pc, deployment-storage-service, generic-parent-operator, helm, http, inspector, integration-config-v1, integration-envoy-auth-v3, integration-scheduler-v2, integrations, k8s-client, kubernetes-informer, monitor, networking-route-operator, operator, operator-arangojob-handler, operator-v2, operator-v2-event, operator-v2-worker, panics, platform-storage-operator, pod_compare, root, root-event-recorder, scheduler-batchjob-operator, scheduler-cronjob-operator, scheduler-deployment-operator, scheduler-pod-operator, scheduler-profile-operator, server, server-authentication (default [info])
|
||||
--log.level stringArray Set log levels in format <level> or <logger>=<level>. Possible loggers: action, agency, api-server, assertion, backup-operator, chaos-monkey, crd, deployment, deployment-ci, deployment-reconcile, deployment-replication, deployment-resilience, deployment-resources, deployment-storage, deployment-storage-pc, deployment-storage-service, generic-parent-operator, helm, http, inspector, integration-config-v1, integration-envoy-auth-v3, integration-scheduler-v2, integration-storage-v2, integrations, k8s-client, kubernetes-informer, monitor, networking-route-operator, operator, operator-arangojob-handler, operator-v2, operator-v2-event, operator-v2-worker, panics, platform-storage-operator, pod_compare, root, root-event-recorder, scheduler-batchjob-operator, scheduler-cronjob-operator, scheduler-deployment-operator, scheduler-pod-operator, scheduler-profile-operator, server, server-authentication (default [info])
|
||||
--log.sampling If true, operator will try to minimize duplication of logging events (default true)
|
||||
--memory-limit uint Define memory limit for hard shutdown and the dump of goroutines. Used for testing
|
||||
--metrics.excluded-prefixes stringArray List of the excluded metrics prefixes
|
||||
|
|
|
@ -18,70 +18,71 @@ Available Commands:
|
|||
help Help about any command
|
||||
|
||||
Flags:
|
||||
--health.address string Address to expose health service (Env: HEALTH_ADDRESS) (default "0.0.0.0:9091")
|
||||
--health.auth.token string Token for health service (when auth service is token) (Env: HEALTH_AUTH_TOKEN)
|
||||
--health.auth.type string Auth type for health service (Env: HEALTH_AUTH_TYPE) (default "None")
|
||||
--health.shutdown.enabled Determines if shutdown service should be enabled and exposed (Env: HEALTH_SHUTDOWN_ENABLED) (default true)
|
||||
--health.tls.keyfile string Path to the keyfile (Env: HEALTH_TLS_KEYFILE)
|
||||
-h, --help help for arangodb_operator_integration
|
||||
--integration.authentication.v1 Enable AuthenticationV1 Integration Service (Env: INTEGRATION_AUTHENTICATION_V1)
|
||||
--integration.authentication.v1.enabled Defines if Authentication is enabled (Env: INTEGRATION_AUTHENTICATION_V1_ENABLED) (default true)
|
||||
--integration.authentication.v1.external Defones if External access to service authentication.v1 is enabled (Env: INTEGRATION_AUTHENTICATION_V1_EXTERNAL)
|
||||
--integration.authentication.v1.internal Defones if Internal access to service authentication.v1 is enabled (Env: INTEGRATION_AUTHENTICATION_V1_INTERNAL) (default true)
|
||||
--integration.authentication.v1.path string Path to the JWT Folder (Env: INTEGRATION_AUTHENTICATION_V1_PATH)
|
||||
--integration.authentication.v1.token.allowed strings Allowed users for the Token (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_ALLOWED)
|
||||
--integration.authentication.v1.token.max-size uint16 Max Token max size in bytes (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_MAX_SIZE) (default 64)
|
||||
--integration.authentication.v1.token.ttl.default duration Default Token TTL (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_TTL_DEFAULT) (default 1h0m0s)
|
||||
--integration.authentication.v1.token.ttl.max duration Max Token TTL (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_TTL_MAX) (default 1h0m0s)
|
||||
--integration.authentication.v1.token.ttl.min duration Min Token TTL (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_TTL_MIN) (default 1m0s)
|
||||
--integration.authentication.v1.token.user string Default user of the Token (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_USER) (default "root")
|
||||
--integration.authentication.v1.ttl duration TTL of the JWT cache (Env: INTEGRATION_AUTHENTICATION_V1_TTL) (default 15s)
|
||||
--integration.authorization.v0 Enable AuthorizationV0 Integration Service (Env: INTEGRATION_AUTHORIZATION_V0)
|
||||
--integration.authorization.v0.external Defones if External access to service authorization.v0 is enabled (Env: INTEGRATION_AUTHORIZATION_V0_EXTERNAL)
|
||||
--integration.authorization.v0.internal Defones if Internal access to service authorization.v0 is enabled (Env: INTEGRATION_AUTHORIZATION_V0_INTERNAL) (default true)
|
||||
--integration.config.v1 Enable ConfigV1 Integration Service (Env: INTEGRATION_CONFIG_V1)
|
||||
--integration.config.v1.external Defones if External access to service config.v1 is enabled (Env: INTEGRATION_CONFIG_V1_EXTERNAL)
|
||||
--integration.config.v1.internal Defones if Internal access to service config.v1 is enabled (Env: INTEGRATION_CONFIG_V1_INTERNAL) (default true)
|
||||
--integration.config.v1.module strings Module in the reference <name>=<abs path> (Env: INTEGRATION_CONFIG_V1_MODULE)
|
||||
--integration.envoy.auth.v3 Enable EnvoyAuthV3 Integration Service (Env: INTEGRATION_ENVOY_AUTH_V3)
|
||||
--integration.envoy.auth.v3.external Defones if External access to service envoy.auth.v3 is enabled (Env: INTEGRATION_ENVOY_AUTH_V3_EXTERNAL)
|
||||
--integration.envoy.auth.v3.internal Defones if Internal access to service envoy.auth.v3 is enabled (Env: INTEGRATION_ENVOY_AUTH_V3_INTERNAL) (default true)
|
||||
--integration.scheduler.v1 SchedulerV1 Integration (Env: INTEGRATION_SCHEDULER_V1)
|
||||
--integration.scheduler.v1.external Defones if External access to service scheduler.v1 is enabled (Env: INTEGRATION_SCHEDULER_V1_EXTERNAL)
|
||||
--integration.scheduler.v1.internal Defones if Internal access to service scheduler.v1 is enabled (Env: INTEGRATION_SCHEDULER_V1_INTERNAL) (default true)
|
||||
--integration.scheduler.v1.namespace string Kubernetes Namespace (Env: INTEGRATION_SCHEDULER_V1_NAMESPACE) (default "default")
|
||||
--integration.scheduler.v1.verify-access Verify the CRD Access (Env: INTEGRATION_SCHEDULER_V1_VERIFY_ACCESS) (default true)
|
||||
--integration.scheduler.v2 SchedulerV2 Integration (Env: INTEGRATION_SCHEDULER_V2)
|
||||
--integration.scheduler.v2.deployment string ArangoDeployment Name (Env: INTEGRATION_SCHEDULER_V2_DEPLOYMENT)
|
||||
--integration.scheduler.v2.external Defones if External access to service scheduler.v2 is enabled (Env: INTEGRATION_SCHEDULER_V2_EXTERNAL)
|
||||
--integration.scheduler.v2.internal Defones if Internal access to service scheduler.v2 is enabled (Env: INTEGRATION_SCHEDULER_V2_INTERNAL) (default true)
|
||||
--integration.scheduler.v2.namespace string Kubernetes Namespace (Env: INTEGRATION_SCHEDULER_V2_NAMESPACE) (default "default")
|
||||
--integration.shutdown.v1 ShutdownV1 Handler (Env: INTEGRATION_SHUTDOWN_V1)
|
||||
--integration.shutdown.v1.external Defones if External access to service shutdown.v1 is enabled (Env: INTEGRATION_SHUTDOWN_V1_EXTERNAL)
|
||||
--integration.shutdown.v1.internal Defones if Internal access to service shutdown.v1 is enabled (Env: INTEGRATION_SHUTDOWN_V1_INTERNAL) (default true)
|
||||
--integration.storage.v1 StorageBucket Integration (Env: INTEGRATION_STORAGE_V1)
|
||||
--integration.storage.v1.external Defones if External access to service storage.v1 is enabled (Env: INTEGRATION_STORAGE_V1_EXTERNAL)
|
||||
--integration.storage.v1.internal Defones if Internal access to service storage.v1 is enabled (Env: INTEGRATION_STORAGE_V1_INTERNAL) (default true)
|
||||
--integration.storage.v1.s3.access-key string Path to file containing S3 AccessKey (Env: INTEGRATION_STORAGE_V1_S3_ACCESS_KEY)
|
||||
--integration.storage.v1.s3.allow-insecure If set to true, the Endpoint certificates won't be checked (Env: INTEGRATION_STORAGE_V1_S3_ALLOW_INSECURE)
|
||||
--integration.storage.v1.s3.bucket string Bucket name (Env: INTEGRATION_STORAGE_V1_S3_BUCKET)
|
||||
--integration.storage.v1.s3.ca-crt string Path to file containing CA certificate to validate endpoint connection (Env: INTEGRATION_STORAGE_V1_S3_CA_CRT)
|
||||
--integration.storage.v1.s3.ca-key string Path to file containing keyfile to validate endpoint connection (Env: INTEGRATION_STORAGE_V1_S3_CA_KEY)
|
||||
--integration.storage.v1.s3.disable-ssl If set to true, the SSL won't be used when connecting to Endpoint (Env: INTEGRATION_STORAGE_V1_S3_DISABLE_SSL)
|
||||
--integration.storage.v1.s3.endpoint string Endpoint of S3 API implementation (Env: INTEGRATION_STORAGE_V1_S3_ENDPOINT)
|
||||
--integration.storage.v1.s3.region string Region (Env: INTEGRATION_STORAGE_V1_S3_REGION)
|
||||
--integration.storage.v1.s3.secret-key string Path to file containing S3 SecretKey (Env: INTEGRATION_STORAGE_V1_S3_SECRET_KEY)
|
||||
--integration.storage.v1.type string Type of the Storage Integration (Env: INTEGRATION_STORAGE_V1_TYPE) (default "s3")
|
||||
--services.address string Address to expose internal services (Env: SERVICES_ADDRESS) (default "127.0.0.1:9092")
|
||||
--services.auth.token string Token for internal service (when auth service is token) (Env: SERVICES_AUTH_TOKEN)
|
||||
--services.auth.type string Auth type for internal service (Env: SERVICES_AUTH_TYPE) (default "None")
|
||||
--services.enabled Defines if internal access is enabled (Env: SERVICES_ENABLED) (default true)
|
||||
--services.external.address string Address to expose external services (Env: SERVICES_EXTERNAL_ADDRESS) (default "0.0.0.0:9093")
|
||||
--services.external.auth.token string Token for external service (when auth service is token) (Env: SERVICES_EXTERNAL_AUTH_TOKEN)
|
||||
--services.external.auth.type string Auth type for external service (Env: SERVICES_EXTERNAL_AUTH_TYPE) (default "None")
|
||||
--services.external.enabled Defines if external access is enabled (Env: SERVICES_EXTERNAL_ENABLED)
|
||||
--services.external.tls.keyfile string Path to the keyfile (Env: SERVICES_EXTERNAL_TLS_KEYFILE)
|
||||
--services.tls.keyfile string Path to the keyfile (Env: SERVICES_TLS_KEYFILE)
|
||||
--health.address string Address to expose health service (Env: HEALTH_ADDRESS) (default "0.0.0.0:9091")
|
||||
--health.auth.token string Token for health service (when auth service is token) (Env: HEALTH_AUTH_TOKEN)
|
||||
--health.auth.type string Auth type for health service (Env: HEALTH_AUTH_TYPE) (default "None")
|
||||
--health.shutdown.enabled Determines if shutdown service should be enabled and exposed (Env: HEALTH_SHUTDOWN_ENABLED) (default true)
|
||||
--health.tls.keyfile string Path to the keyfile (Env: HEALTH_TLS_KEYFILE)
|
||||
-h, --help help for arangodb_operator_integration
|
||||
--integration.authentication.v1 Enable AuthenticationV1 Integration Service (Env: INTEGRATION_AUTHENTICATION_V1)
|
||||
--integration.authentication.v1.enabled Defines if Authentication is enabled (Env: INTEGRATION_AUTHENTICATION_V1_ENABLED) (default true)
|
||||
--integration.authentication.v1.external Defones if External access to service authentication.v1 is enabled (Env: INTEGRATION_AUTHENTICATION_V1_EXTERNAL)
|
||||
--integration.authentication.v1.internal Defones if Internal access to service authentication.v1 is enabled (Env: INTEGRATION_AUTHENTICATION_V1_INTERNAL) (default true)
|
||||
--integration.authentication.v1.path string Path to the JWT Folder (Env: INTEGRATION_AUTHENTICATION_V1_PATH)
|
||||
--integration.authentication.v1.token.allowed strings Allowed users for the Token (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_ALLOWED)
|
||||
--integration.authentication.v1.token.max-size uint16 Max Token max size in bytes (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_MAX_SIZE) (default 64)
|
||||
--integration.authentication.v1.token.ttl.default duration Default Token TTL (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_TTL_DEFAULT) (default 1h0m0s)
|
||||
--integration.authentication.v1.token.ttl.max duration Max Token TTL (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_TTL_MAX) (default 1h0m0s)
|
||||
--integration.authentication.v1.token.ttl.min duration Min Token TTL (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_TTL_MIN) (default 1m0s)
|
||||
--integration.authentication.v1.token.user string Default user of the Token (Env: INTEGRATION_AUTHENTICATION_V1_TOKEN_USER) (default "root")
|
||||
--integration.authentication.v1.ttl duration TTL of the JWT cache (Env: INTEGRATION_AUTHENTICATION_V1_TTL) (default 15s)
|
||||
--integration.authorization.v0 Enable AuthorizationV0 Integration Service (Env: INTEGRATION_AUTHORIZATION_V0)
|
||||
--integration.authorization.v0.external Defones if External access to service authorization.v0 is enabled (Env: INTEGRATION_AUTHORIZATION_V0_EXTERNAL)
|
||||
--integration.authorization.v0.internal Defones if Internal access to service authorization.v0 is enabled (Env: INTEGRATION_AUTHORIZATION_V0_INTERNAL) (default true)
|
||||
--integration.config.v1 Enable ConfigV1 Integration Service (Env: INTEGRATION_CONFIG_V1)
|
||||
--integration.config.v1.external Defones if External access to service config.v1 is enabled (Env: INTEGRATION_CONFIG_V1_EXTERNAL)
|
||||
--integration.config.v1.internal Defones if Internal access to service config.v1 is enabled (Env: INTEGRATION_CONFIG_V1_INTERNAL) (default true)
|
||||
--integration.config.v1.module strings Module in the reference <name>=<abs path> (Env: INTEGRATION_CONFIG_V1_MODULE)
|
||||
--integration.envoy.auth.v3 Enable EnvoyAuthV3 Integration Service (Env: INTEGRATION_ENVOY_AUTH_V3)
|
||||
--integration.envoy.auth.v3.external Defones if External access to service envoy.auth.v3 is enabled (Env: INTEGRATION_ENVOY_AUTH_V3_EXTERNAL)
|
||||
--integration.envoy.auth.v3.internal Defones if Internal access to service envoy.auth.v3 is enabled (Env: INTEGRATION_ENVOY_AUTH_V3_INTERNAL) (default true)
|
||||
--integration.scheduler.v1 SchedulerV1 Integration (Env: INTEGRATION_SCHEDULER_V1)
|
||||
--integration.scheduler.v1.external Defones if External access to service scheduler.v1 is enabled (Env: INTEGRATION_SCHEDULER_V1_EXTERNAL)
|
||||
--integration.scheduler.v1.internal Defones if Internal access to service scheduler.v1 is enabled (Env: INTEGRATION_SCHEDULER_V1_INTERNAL) (default true)
|
||||
--integration.scheduler.v1.namespace string Kubernetes Namespace (Env: INTEGRATION_SCHEDULER_V1_NAMESPACE) (default "default")
|
||||
--integration.scheduler.v1.verify-access Verify the CRD Access (Env: INTEGRATION_SCHEDULER_V1_VERIFY_ACCESS) (default true)
|
||||
--integration.scheduler.v2 SchedulerV2 Integration (Env: INTEGRATION_SCHEDULER_V2)
|
||||
--integration.scheduler.v2.deployment string ArangoDeployment Name (Env: INTEGRATION_SCHEDULER_V2_DEPLOYMENT)
|
||||
--integration.scheduler.v2.external Defones if External access to service scheduler.v2 is enabled (Env: INTEGRATION_SCHEDULER_V2_EXTERNAL)
|
||||
--integration.scheduler.v2.internal Defones if Internal access to service scheduler.v2 is enabled (Env: INTEGRATION_SCHEDULER_V2_INTERNAL) (default true)
|
||||
--integration.scheduler.v2.namespace string Kubernetes Namespace (Env: INTEGRATION_SCHEDULER_V2_NAMESPACE) (default "default")
|
||||
--integration.shutdown.v1 ShutdownV1 Handler (Env: INTEGRATION_SHUTDOWN_V1)
|
||||
--integration.shutdown.v1.external Defones if External access to service shutdown.v1 is enabled (Env: INTEGRATION_SHUTDOWN_V1_EXTERNAL)
|
||||
--integration.shutdown.v1.internal Defones if Internal access to service shutdown.v1 is enabled (Env: INTEGRATION_SHUTDOWN_V1_INTERNAL) (default true)
|
||||
--integration.storage.v2 StorageBucket V2 Integration (Env: INTEGRATION_STORAGE_V2)
|
||||
--integration.storage.v2.external Defones if External access to service storage.v2 is enabled (Env: INTEGRATION_STORAGE_V2_EXTERNAL)
|
||||
--integration.storage.v2.internal Defones if Internal access to service storage.v2 is enabled (Env: INTEGRATION_STORAGE_V2_INTERNAL) (default true)
|
||||
--integration.storage.v2.s3.allow-insecure If set to true, the Endpoint certificates won't be checked (Env: INTEGRATION_STORAGE_V2_S3_ALLOW_INSECURE)
|
||||
--integration.storage.v2.s3.bucket.name string Bucket name (Env: INTEGRATION_STORAGE_V2_S3_BUCKET_NAME)
|
||||
--integration.storage.v2.s3.bucket.prefix string Bucket Prefix (Env: INTEGRATION_STORAGE_V2_S3_BUCKET_PREFIX)
|
||||
--integration.storage.v2.s3.ca strings Path to file containing CA certificate to validate endpoint connection (Env: INTEGRATION_STORAGE_V2_S3_CA)
|
||||
--integration.storage.v2.s3.disable-ssl If set to true, the SSL won't be used when connecting to Endpoint (Env: INTEGRATION_STORAGE_V2_S3_DISABLE_SSL)
|
||||
--integration.storage.v2.s3.endpoint string Endpoint of S3 API implementation (Env: INTEGRATION_STORAGE_V2_S3_ENDPOINT)
|
||||
--integration.storage.v2.s3.provider.file.access-key string Path to file containing S3 AccessKey (Env: INTEGRATION_STORAGE_V2_S3_PROVIDER_FILE_ACCESS_KEY)
|
||||
--integration.storage.v2.s3.provider.file.secret-key string Path to file containing S3 SecretKey (Env: INTEGRATION_STORAGE_V2_S3_PROVIDER_FILE_SECRET_KEY)
|
||||
--integration.storage.v2.s3.provider.type string S3 Credentials Provider type (Env: INTEGRATION_STORAGE_V2_S3_PROVIDER_TYPE) (default "file")
|
||||
--integration.storage.v2.s3.region string Region (Env: INTEGRATION_STORAGE_V2_S3_REGION)
|
||||
--integration.storage.v2.type string Type of the Storage Integration (Env: INTEGRATION_STORAGE_V2_TYPE) (default "s3")
|
||||
--services.address string Address to expose internal services (Env: SERVICES_ADDRESS) (default "127.0.0.1:9092")
|
||||
--services.auth.token string Token for internal service (when auth service is token) (Env: SERVICES_AUTH_TOKEN)
|
||||
--services.auth.type string Auth type for internal service (Env: SERVICES_AUTH_TYPE) (default "None")
|
||||
--services.enabled Defines if internal access is enabled (Env: SERVICES_ENABLED) (default true)
|
||||
--services.external.address string Address to expose external services (Env: SERVICES_EXTERNAL_ADDRESS) (default "0.0.0.0:9093")
|
||||
--services.external.auth.token string Token for external service (when auth service is token) (Env: SERVICES_EXTERNAL_AUTH_TOKEN)
|
||||
--services.external.auth.type string Auth type for external service (Env: SERVICES_EXTERNAL_AUTH_TYPE) (default "None")
|
||||
--services.external.enabled Defines if external access is enabled (Env: SERVICES_EXTERNAL_ENABLED)
|
||||
--services.external.tls.keyfile string Path to the keyfile (Env: SERVICES_EXTERNAL_TLS_KEYFILE)
|
||||
--services.tls.keyfile string Path to the keyfile (Env: SERVICES_TLS_KEYFILE)
|
||||
|
||||
Use "arangodb_operator_integration [command] --help" for more information about a command.
|
||||
```
|
||||
|
|
109
integrations/storage/v2/definition/helpers.go
Normal file
109
integrations/storage/v2/definition/helpers.go
Normal file
|
@ -0,0 +1,109 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package definition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
)
|
||||
|
||||
const BufferSize = 4094
|
||||
|
||||
func Send(ctx context.Context, client StorageV2Client, key string, in io.Reader) (*StorageV2WriteObjectResponse, error) {
|
||||
cache := make([]byte, BufferSize)
|
||||
|
||||
wr, err := client.WriteObject(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
n, err := in.Read(cache)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
if cerr := wr.CloseSend(); cerr != nil {
|
||||
return nil, errors.Errors(err, cerr)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := wr.Send(&StorageV2WriteObjectRequest{
|
||||
Path: &StorageV2Path{
|
||||
Path: key,
|
||||
},
|
||||
Chunk: cache[:n],
|
||||
}); err != nil {
|
||||
if cerr := wr.CloseSend(); cerr != nil {
|
||||
return nil, errors.Errors(err, cerr)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return wr.CloseAndRecv()
|
||||
}
|
||||
|
||||
func Receive(ctx context.Context, client StorageV2Client, key string, out io.Writer) (int, error) {
|
||||
wr, err := client.ReadObject(ctx, &StorageV2ReadObjectRequest{
|
||||
Path: &StorageV2Path{Path: key},
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var bytes int
|
||||
|
||||
for {
|
||||
resp, err := wr.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
if cerr := wr.CloseSend(); cerr != nil {
|
||||
return 0, errors.Errors(err, cerr)
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n, err := util.WriteAll(out, resp.GetChunk())
|
||||
if err != nil {
|
||||
if cerr := wr.CloseSend(); cerr != nil {
|
||||
return 0, errors.Errors(err, cerr)
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
bytes += n
|
||||
}
|
||||
|
||||
return bytes, nil
|
||||
}
|
|
@ -206,6 +206,94 @@ func (x *StorageV2ObjectInfo) GetLastUpdated() *timestamppb.Timestamp {
|
|||
return nil
|
||||
}
|
||||
|
||||
// StorageV2 Init Request
|
||||
type StorageV2InitRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Defines if required resources should be created
|
||||
Create *bool `protobuf:"varint,1,opt,name=create,proto3,oneof" json:"create,omitempty"`
|
||||
}
|
||||
|
||||
func (x *StorageV2InitRequest) Reset() {
|
||||
*x = StorageV2InitRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *StorageV2InitRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*StorageV2InitRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2InitRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use StorageV2InitRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2InitRequest) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *StorageV2InitRequest) GetCreate() bool {
|
||||
if x != nil && x.Create != nil {
|
||||
return *x.Create
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StorageV2 Init Response
|
||||
type StorageV2InitResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *StorageV2InitResponse) Reset() {
|
||||
*x = StorageV2InitResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *StorageV2InitResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*StorageV2InitResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2InitResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use StorageV2InitResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2InitResponse) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
// StorageV2 ReadObject Request
|
||||
type StorageV2ReadObjectRequest struct {
|
||||
state protoimpl.MessageState
|
||||
|
@ -219,7 +307,7 @@ type StorageV2ReadObjectRequest struct {
|
|||
func (x *StorageV2ReadObjectRequest) Reset() {
|
||||
*x = StorageV2ReadObjectRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[3]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -232,7 +320,7 @@ func (x *StorageV2ReadObjectRequest) String() string {
|
|||
func (*StorageV2ReadObjectRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2ReadObjectRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[3]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -245,7 +333,7 @@ func (x *StorageV2ReadObjectRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2ReadObjectRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2ReadObjectRequest) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{3}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *StorageV2ReadObjectRequest) GetPath() *StorageV2Path {
|
||||
|
@ -268,7 +356,7 @@ type StorageV2ReadObjectResponse struct {
|
|||
func (x *StorageV2ReadObjectResponse) Reset() {
|
||||
*x = StorageV2ReadObjectResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[4]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -281,7 +369,7 @@ func (x *StorageV2ReadObjectResponse) String() string {
|
|||
func (*StorageV2ReadObjectResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2ReadObjectResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[4]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -294,7 +382,7 @@ func (x *StorageV2ReadObjectResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2ReadObjectResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2ReadObjectResponse) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{4}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *StorageV2ReadObjectResponse) GetChunk() []byte {
|
||||
|
@ -319,7 +407,7 @@ type StorageV2WriteObjectRequest struct {
|
|||
func (x *StorageV2WriteObjectRequest) Reset() {
|
||||
*x = StorageV2WriteObjectRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[5]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -332,7 +420,7 @@ func (x *StorageV2WriteObjectRequest) String() string {
|
|||
func (*StorageV2WriteObjectRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2WriteObjectRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[5]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -345,7 +433,7 @@ func (x *StorageV2WriteObjectRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2WriteObjectRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2WriteObjectRequest) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{5}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *StorageV2WriteObjectRequest) GetPath() *StorageV2Path {
|
||||
|
@ -377,7 +465,7 @@ type StorageV2WriteObjectResponse struct {
|
|||
func (x *StorageV2WriteObjectResponse) Reset() {
|
||||
*x = StorageV2WriteObjectResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[6]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -390,7 +478,7 @@ func (x *StorageV2WriteObjectResponse) String() string {
|
|||
func (*StorageV2WriteObjectResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2WriteObjectResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[6]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -403,7 +491,7 @@ func (x *StorageV2WriteObjectResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2WriteObjectResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2WriteObjectResponse) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{6}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *StorageV2WriteObjectResponse) GetBytes() int64 {
|
||||
|
@ -433,7 +521,7 @@ type StorageV2HeadObjectRequest struct {
|
|||
func (x *StorageV2HeadObjectRequest) Reset() {
|
||||
*x = StorageV2HeadObjectRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[7]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -446,7 +534,7 @@ func (x *StorageV2HeadObjectRequest) String() string {
|
|||
func (*StorageV2HeadObjectRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2HeadObjectRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[7]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -459,7 +547,7 @@ func (x *StorageV2HeadObjectRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2HeadObjectRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2HeadObjectRequest) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{7}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *StorageV2HeadObjectRequest) GetPath() *StorageV2Path {
|
||||
|
@ -482,7 +570,7 @@ type StorageV2HeadObjectResponse struct {
|
|||
func (x *StorageV2HeadObjectResponse) Reset() {
|
||||
*x = StorageV2HeadObjectResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[8]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -495,7 +583,7 @@ func (x *StorageV2HeadObjectResponse) String() string {
|
|||
func (*StorageV2HeadObjectResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2HeadObjectResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[8]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[10]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -508,7 +596,7 @@ func (x *StorageV2HeadObjectResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2HeadObjectResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2HeadObjectResponse) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{8}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *StorageV2HeadObjectResponse) GetInfo() *StorageV2ObjectInfo {
|
||||
|
@ -531,7 +619,7 @@ type StorageV2DeleteObjectRequest struct {
|
|||
func (x *StorageV2DeleteObjectRequest) Reset() {
|
||||
*x = StorageV2DeleteObjectRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[9]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -544,7 +632,7 @@ func (x *StorageV2DeleteObjectRequest) String() string {
|
|||
func (*StorageV2DeleteObjectRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2DeleteObjectRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[9]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[11]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -557,7 +645,7 @@ func (x *StorageV2DeleteObjectRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2DeleteObjectRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2DeleteObjectRequest) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{9}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{11}
|
||||
}
|
||||
|
||||
func (x *StorageV2DeleteObjectRequest) GetPath() *StorageV2Path {
|
||||
|
@ -577,7 +665,7 @@ type StorageV2DeleteObjectResponse struct {
|
|||
func (x *StorageV2DeleteObjectResponse) Reset() {
|
||||
*x = StorageV2DeleteObjectResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[10]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -590,7 +678,7 @@ func (x *StorageV2DeleteObjectResponse) String() string {
|
|||
func (*StorageV2DeleteObjectResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2DeleteObjectResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[10]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[12]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -603,7 +691,7 @@ func (x *StorageV2DeleteObjectResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2DeleteObjectResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2DeleteObjectResponse) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{10}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{12}
|
||||
}
|
||||
|
||||
// StorageV2 ListObjects Request
|
||||
|
@ -619,7 +707,7 @@ type StorageV2ListObjectsRequest struct {
|
|||
func (x *StorageV2ListObjectsRequest) Reset() {
|
||||
*x = StorageV2ListObjectsRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[11]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[13]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -632,7 +720,7 @@ func (x *StorageV2ListObjectsRequest) String() string {
|
|||
func (*StorageV2ListObjectsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2ListObjectsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[11]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[13]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -645,7 +733,7 @@ func (x *StorageV2ListObjectsRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2ListObjectsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2ListObjectsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{11}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{13}
|
||||
}
|
||||
|
||||
func (x *StorageV2ListObjectsRequest) GetPath() *StorageV2Path {
|
||||
|
@ -668,7 +756,7 @@ type StorageV2ListObjectsResponse struct {
|
|||
func (x *StorageV2ListObjectsResponse) Reset() {
|
||||
*x = StorageV2ListObjectsResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[12]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -681,7 +769,7 @@ func (x *StorageV2ListObjectsResponse) String() string {
|
|||
func (*StorageV2ListObjectsResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StorageV2ListObjectsResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[12]
|
||||
mi := &file_integrations_storage_v2_definition_storage_proto_msgTypes[14]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -694,7 +782,7 @@ func (x *StorageV2ListObjectsResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StorageV2ListObjectsResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StorageV2ListObjectsResponse) Descriptor() ([]byte, []int) {
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{12}
|
||||
return file_integrations_storage_v2_definition_storage_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *StorageV2ListObjectsResponse) GetFiles() []*StorageV2Object {
|
||||
|
@ -729,88 +817,98 @@ var file_integrations_storage_v2_definition_storage_proto_rawDesc = []byte{
|
|||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x22,
|
||||
0x49, 0x0a, 0x1a, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x52, 0x65, 0x61, 0x64,
|
||||
0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a,
|
||||
0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68,
|
||||
0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x33, 0x0a, 0x1b, 0x53, 0x74,
|
||||
0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
|
||||
0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75,
|
||||
0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22,
|
||||
0x60, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x57, 0x72, 0x69, 0x74,
|
||||
0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b,
|
||||
0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73,
|
||||
0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56,
|
||||
0x32, 0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x63,
|
||||
0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e,
|
||||
0x6b, 0x22, 0x50, 0x0a, 0x1c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x57, 0x72,
|
||||
0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
|
||||
0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b,
|
||||
0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b,
|
||||
0x73, 0x75, 0x6d, 0x22, 0x49, 0x0a, 0x1a, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x17, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61,
|
||||
0x67, 0x65, 0x56, 0x32, 0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x50,
|
||||
0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x48, 0x65, 0x61, 0x64, 0x4f,
|
||||
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a,
|
||||
0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x68,
|
||||
0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f,
|
||||
0x22, 0x4b, 0x0a, 0x1c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
|
||||
0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
|
||||
0x65, 0x56, 0x32, 0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x1f, 0x0a,
|
||||
0x1d, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a,
|
||||
0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x4f,
|
||||
0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a,
|
||||
0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68,
|
||||
0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x4f, 0x0a, 0x1c, 0x53, 0x74,
|
||||
0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
|
||||
0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x66, 0x69,
|
||||
0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x68, 0x75, 0x74,
|
||||
0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4f, 0x62,
|
||||
0x6a, 0x65, 0x63, 0x74, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x32, 0xe4, 0x03, 0x0a, 0x09,
|
||||
0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x12, 0x5b, 0x0a, 0x0a, 0x52, 0x65, 0x61,
|
||||
0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f,
|
||||
0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x52, 0x65, 0x61, 0x64,
|
||||
0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e,
|
||||
0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
|
||||
0x56, 0x32, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x5e, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f,
|
||||
0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e,
|
||||
0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f,
|
||||
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x73,
|
||||
0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56,
|
||||
0x32, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x59, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62,
|
||||
0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e,
|
||||
0x3e, 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x49, 0x6e, 0x69, 0x74,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x22,
|
||||
0x17, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x49, 0x6e, 0x69, 0x74,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x49, 0x0a, 0x1a, 0x53, 0x74, 0x6f, 0x72,
|
||||
0x61, 0x67, 0x65, 0x56, 0x32, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e,
|
||||
0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70,
|
||||
0x61, 0x74, 0x68, 0x22, 0x33, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0x60, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72,
|
||||
0x61, 0x67, 0x65, 0x56, 0x32, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e,
|
||||
0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x50, 0x61, 0x74, 0x68, 0x52, 0x04,
|
||||
0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0x50, 0x0a, 0x1c, 0x53, 0x74,
|
||||
0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79,
|
||||
0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73,
|
||||
0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22, 0x49, 0x0a, 0x1a,
|
||||
0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a,
|
||||
0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x68, 0x75,
|
||||
0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x48,
|
||||
0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x5f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
|
||||
0x74, 0x12, 0x26, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f,
|
||||
0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61,
|
||||
0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64,
|
||||
0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x50, 0x61, 0x74,
|
||||
0x68, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x50, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72, 0x61,
|
||||
0x67, 0x65, 0x56, 0x32, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e,
|
||||
0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49,
|
||||
0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x4b, 0x0a, 0x1c, 0x53, 0x74, 0x6f,
|
||||
0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x73, 0x68, 0x75, 0x74,
|
||||
0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x5e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
|
||||
0x73, 0x12, 0x25, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f,
|
||||
0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64,
|
||||
0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x61, 0x72, 0x61, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x2d, 0x61,
|
||||
0x72, 0x61, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x2f,
|
||||
0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74,
|
||||
0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f,
|
||||
0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x50, 0x61, 0x74, 0x68,
|
||||
0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x1f, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
|
||||
0x65, 0x56, 0x32, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x72, 0x61,
|
||||
0x67, 0x65, 0x56, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e,
|
||||
0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x50, 0x61, 0x74, 0x68, 0x52, 0x04, 0x70,
|
||||
0x61, 0x74, 0x68, 0x22, 0x4f, 0x0a, 0x1c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74,
|
||||
0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x05, 0x66,
|
||||
0x69, 0x6c, 0x65, 0x73, 0x32, 0xad, 0x04, 0x0a, 0x09, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
|
||||
0x56, 0x32, 0x12, 0x47, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x1e, 0x2e, 0x73, 0x68, 0x75,
|
||||
0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x49,
|
||||
0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x68, 0x75,
|
||||
0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x49,
|
||||
0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x0a, 0x52,
|
||||
0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x73, 0x68, 0x75, 0x74,
|
||||
0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x52, 0x65,
|
||||
0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x25, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61,
|
||||
0x67, 0x65, 0x56, 0x32, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x5e, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74,
|
||||
0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f,
|
||||
0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x57, 0x72, 0x69, 0x74,
|
||||
0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
|
||||
0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
|
||||
0x65, 0x56, 0x32, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x59, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64,
|
||||
0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77,
|
||||
0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x48, 0x65, 0x61, 0x64, 0x4f,
|
||||
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73,
|
||||
0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56,
|
||||
0x32, 0x48, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a,
|
||||
0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53,
|
||||
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62,
|
||||
0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x73, 0x68,
|
||||
0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53,
|
||||
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x73, 0x68, 0x75,
|
||||
0x74, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x32, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x61, 0x72, 0x61, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x2f, 0x6b, 0x75, 0x62, 0x65,
|
||||
0x2d, 0x61, 0x72, 0x61, 0x6e, 0x67, 0x6f, 0x64, 0x62, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76,
|
||||
0x32, 0x2f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -825,27 +923,29 @@ func file_integrations_storage_v2_definition_storage_proto_rawDescGZIP() []byte
|
|||
return file_integrations_storage_v2_definition_storage_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_integrations_storage_v2_definition_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
|
||||
var file_integrations_storage_v2_definition_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
|
||||
var file_integrations_storage_v2_definition_storage_proto_goTypes = []interface{}{
|
||||
(*StorageV2Path)(nil), // 0: shutdown.StorageV2Path
|
||||
(*StorageV2Object)(nil), // 1: shutdown.StorageV2Object
|
||||
(*StorageV2ObjectInfo)(nil), // 2: shutdown.StorageV2ObjectInfo
|
||||
(*StorageV2ReadObjectRequest)(nil), // 3: shutdown.StorageV2ReadObjectRequest
|
||||
(*StorageV2ReadObjectResponse)(nil), // 4: shutdown.StorageV2ReadObjectResponse
|
||||
(*StorageV2WriteObjectRequest)(nil), // 5: shutdown.StorageV2WriteObjectRequest
|
||||
(*StorageV2WriteObjectResponse)(nil), // 6: shutdown.StorageV2WriteObjectResponse
|
||||
(*StorageV2HeadObjectRequest)(nil), // 7: shutdown.StorageV2HeadObjectRequest
|
||||
(*StorageV2HeadObjectResponse)(nil), // 8: shutdown.StorageV2HeadObjectResponse
|
||||
(*StorageV2DeleteObjectRequest)(nil), // 9: shutdown.StorageV2DeleteObjectRequest
|
||||
(*StorageV2DeleteObjectResponse)(nil), // 10: shutdown.StorageV2DeleteObjectResponse
|
||||
(*StorageV2ListObjectsRequest)(nil), // 11: shutdown.StorageV2ListObjectsRequest
|
||||
(*StorageV2ListObjectsResponse)(nil), // 12: shutdown.StorageV2ListObjectsResponse
|
||||
(*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
|
||||
(*StorageV2InitRequest)(nil), // 3: shutdown.StorageV2InitRequest
|
||||
(*StorageV2InitResponse)(nil), // 4: shutdown.StorageV2InitResponse
|
||||
(*StorageV2ReadObjectRequest)(nil), // 5: shutdown.StorageV2ReadObjectRequest
|
||||
(*StorageV2ReadObjectResponse)(nil), // 6: shutdown.StorageV2ReadObjectResponse
|
||||
(*StorageV2WriteObjectRequest)(nil), // 7: shutdown.StorageV2WriteObjectRequest
|
||||
(*StorageV2WriteObjectResponse)(nil), // 8: shutdown.StorageV2WriteObjectResponse
|
||||
(*StorageV2HeadObjectRequest)(nil), // 9: shutdown.StorageV2HeadObjectRequest
|
||||
(*StorageV2HeadObjectResponse)(nil), // 10: shutdown.StorageV2HeadObjectResponse
|
||||
(*StorageV2DeleteObjectRequest)(nil), // 11: shutdown.StorageV2DeleteObjectRequest
|
||||
(*StorageV2DeleteObjectResponse)(nil), // 12: shutdown.StorageV2DeleteObjectResponse
|
||||
(*StorageV2ListObjectsRequest)(nil), // 13: shutdown.StorageV2ListObjectsRequest
|
||||
(*StorageV2ListObjectsResponse)(nil), // 14: shutdown.StorageV2ListObjectsResponse
|
||||
(*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp
|
||||
}
|
||||
var file_integrations_storage_v2_definition_storage_proto_depIdxs = []int32{
|
||||
0, // 0: shutdown.StorageV2Object.path:type_name -> shutdown.StorageV2Path
|
||||
2, // 1: shutdown.StorageV2Object.info:type_name -> shutdown.StorageV2ObjectInfo
|
||||
13, // 2: shutdown.StorageV2ObjectInfo.last_updated:type_name -> google.protobuf.Timestamp
|
||||
15, // 2: shutdown.StorageV2ObjectInfo.last_updated:type_name -> google.protobuf.Timestamp
|
||||
0, // 3: shutdown.StorageV2ReadObjectRequest.path:type_name -> shutdown.StorageV2Path
|
||||
0, // 4: shutdown.StorageV2WriteObjectRequest.path:type_name -> shutdown.StorageV2Path
|
||||
0, // 5: shutdown.StorageV2HeadObjectRequest.path:type_name -> shutdown.StorageV2Path
|
||||
|
@ -853,18 +953,20 @@ var file_integrations_storage_v2_definition_storage_proto_depIdxs = []int32{
|
|||
0, // 7: shutdown.StorageV2DeleteObjectRequest.path:type_name -> shutdown.StorageV2Path
|
||||
0, // 8: shutdown.StorageV2ListObjectsRequest.path:type_name -> shutdown.StorageV2Path
|
||||
1, // 9: shutdown.StorageV2ListObjectsResponse.files:type_name -> shutdown.StorageV2Object
|
||||
3, // 10: shutdown.StorageV2.ReadObject:input_type -> shutdown.StorageV2ReadObjectRequest
|
||||
5, // 11: shutdown.StorageV2.WriteObject:input_type -> shutdown.StorageV2WriteObjectRequest
|
||||
7, // 12: shutdown.StorageV2.HeadObject:input_type -> shutdown.StorageV2HeadObjectRequest
|
||||
9, // 13: shutdown.StorageV2.DeleteObject:input_type -> shutdown.StorageV2DeleteObjectRequest
|
||||
11, // 14: shutdown.StorageV2.ListObjects:input_type -> shutdown.StorageV2ListObjectsRequest
|
||||
4, // 15: shutdown.StorageV2.ReadObject:output_type -> shutdown.StorageV2ReadObjectResponse
|
||||
6, // 16: shutdown.StorageV2.WriteObject:output_type -> shutdown.StorageV2WriteObjectResponse
|
||||
8, // 17: shutdown.StorageV2.HeadObject:output_type -> shutdown.StorageV2HeadObjectResponse
|
||||
10, // 18: shutdown.StorageV2.DeleteObject:output_type -> shutdown.StorageV2DeleteObjectResponse
|
||||
12, // 19: shutdown.StorageV2.ListObjects:output_type -> shutdown.StorageV2ListObjectsResponse
|
||||
15, // [15:20] is the sub-list for method output_type
|
||||
10, // [10:15] is the sub-list for method input_type
|
||||
3, // 10: shutdown.StorageV2.Init:input_type -> shutdown.StorageV2InitRequest
|
||||
5, // 11: shutdown.StorageV2.ReadObject:input_type -> shutdown.StorageV2ReadObjectRequest
|
||||
7, // 12: shutdown.StorageV2.WriteObject:input_type -> shutdown.StorageV2WriteObjectRequest
|
||||
9, // 13: shutdown.StorageV2.HeadObject:input_type -> shutdown.StorageV2HeadObjectRequest
|
||||
11, // 14: shutdown.StorageV2.DeleteObject:input_type -> shutdown.StorageV2DeleteObjectRequest
|
||||
13, // 15: shutdown.StorageV2.ListObjects:input_type -> shutdown.StorageV2ListObjectsRequest
|
||||
4, // 16: shutdown.StorageV2.Init:output_type -> shutdown.StorageV2InitResponse
|
||||
6, // 17: shutdown.StorageV2.ReadObject:output_type -> shutdown.StorageV2ReadObjectResponse
|
||||
8, // 18: shutdown.StorageV2.WriteObject:output_type -> shutdown.StorageV2WriteObjectResponse
|
||||
10, // 19: shutdown.StorageV2.HeadObject:output_type -> shutdown.StorageV2HeadObjectResponse
|
||||
12, // 20: shutdown.StorageV2.DeleteObject:output_type -> shutdown.StorageV2DeleteObjectResponse
|
||||
14, // 21: shutdown.StorageV2.ListObjects:output_type -> shutdown.StorageV2ListObjectsResponse
|
||||
16, // [16:22] is the sub-list for method output_type
|
||||
10, // [10:16] is the sub-list for method input_type
|
||||
10, // [10:10] is the sub-list for extension type_name
|
||||
10, // [10:10] is the sub-list for extension extendee
|
||||
0, // [0:10] is the sub-list for field type_name
|
||||
|
@ -913,7 +1015,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2ReadObjectRequest); i {
|
||||
switch v := v.(*StorageV2InitRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -925,7 +1027,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2ReadObjectResponse); i {
|
||||
switch v := v.(*StorageV2InitResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -937,7 +1039,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2WriteObjectRequest); i {
|
||||
switch v := v.(*StorageV2ReadObjectRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -949,7 +1051,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2WriteObjectResponse); i {
|
||||
switch v := v.(*StorageV2ReadObjectResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -961,7 +1063,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2HeadObjectRequest); i {
|
||||
switch v := v.(*StorageV2WriteObjectRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -973,7 +1075,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2HeadObjectResponse); i {
|
||||
switch v := v.(*StorageV2WriteObjectResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -985,7 +1087,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2DeleteObjectRequest); i {
|
||||
switch v := v.(*StorageV2HeadObjectRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -997,7 +1099,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2DeleteObjectResponse); i {
|
||||
switch v := v.(*StorageV2HeadObjectResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -1009,7 +1111,7 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2ListObjectsRequest); i {
|
||||
switch v := v.(*StorageV2DeleteObjectRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
|
@ -1021,6 +1123,30 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2DeleteObjectResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2ListObjectsRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StorageV2ListObjectsResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -1033,13 +1159,14 @@ func file_integrations_storage_v2_definition_storage_proto_init() {
|
|||
}
|
||||
}
|
||||
}
|
||||
file_integrations_storage_v2_definition_storage_proto_msgTypes[3].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_integrations_storage_v2_definition_storage_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 13,
|
||||
NumMessages: 15,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
|
|
@ -28,6 +28,9 @@ option go_package = "github.com/arangodb/kube-arangodb/integrations/storage/v2/d
|
|||
|
||||
// Defines StorageV2 Service
|
||||
service StorageV2 {
|
||||
// Allows to init the Storage settings (like bucket creation)
|
||||
rpc Init(StorageV2InitRequest) returns (StorageV2InitResponse);
|
||||
|
||||
// Allows to Read Objects using stream
|
||||
rpc ReadObject(StorageV2ReadObjectRequest) returns (stream StorageV2ReadObjectResponse);
|
||||
|
||||
|
@ -66,6 +69,16 @@ message StorageV2ObjectInfo {
|
|||
google.protobuf.Timestamp last_updated = 2;
|
||||
}
|
||||
|
||||
// StorageV2 Init Request
|
||||
message StorageV2InitRequest {
|
||||
// Defines if required resources should be created
|
||||
optional bool create = 1;
|
||||
}
|
||||
|
||||
// StorageV2 Init Response
|
||||
message StorageV2InitResponse {
|
||||
}
|
||||
|
||||
// StorageV2 ReadObject Request
|
||||
message StorageV2ReadObjectRequest {
|
||||
// Defines Object Path/Key
|
||||
|
|
|
@ -22,6 +22,8 @@ const _ = grpc.SupportPackageIsVersion7
|
|||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type StorageV2Client interface {
|
||||
// Allows to init the Storage settings (like bucket creation)
|
||||
Init(ctx context.Context, in *StorageV2InitRequest, opts ...grpc.CallOption) (*StorageV2InitResponse, error)
|
||||
// Allows to Read Objects using stream
|
||||
ReadObject(ctx context.Context, in *StorageV2ReadObjectRequest, opts ...grpc.CallOption) (StorageV2_ReadObjectClient, error)
|
||||
// Allows to Write Objects using stream
|
||||
|
@ -42,6 +44,15 @@ func NewStorageV2Client(cc grpc.ClientConnInterface) StorageV2Client {
|
|||
return &storageV2Client{cc}
|
||||
}
|
||||
|
||||
func (c *storageV2Client) Init(ctx context.Context, in *StorageV2InitRequest, opts ...grpc.CallOption) (*StorageV2InitResponse, error) {
|
||||
out := new(StorageV2InitResponse)
|
||||
err := c.cc.Invoke(ctx, "/shutdown.StorageV2/Init", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *storageV2Client) ReadObject(ctx context.Context, in *StorageV2ReadObjectRequest, opts ...grpc.CallOption) (StorageV2_ReadObjectClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &StorageV2_ServiceDesc.Streams[0], "/shutdown.StorageV2/ReadObject", opts...)
|
||||
if err != nil {
|
||||
|
@ -162,6 +173,8 @@ func (x *storageV2ListObjectsClient) Recv() (*StorageV2ListObjectsResponse, erro
|
|||
// All implementations must embed UnimplementedStorageV2Server
|
||||
// for forward compatibility
|
||||
type StorageV2Server interface {
|
||||
// Allows to init the Storage settings (like bucket creation)
|
||||
Init(context.Context, *StorageV2InitRequest) (*StorageV2InitResponse, error)
|
||||
// Allows to Read Objects using stream
|
||||
ReadObject(*StorageV2ReadObjectRequest, StorageV2_ReadObjectServer) error
|
||||
// Allows to Write Objects using stream
|
||||
|
@ -179,6 +192,9 @@ type StorageV2Server interface {
|
|||
type UnimplementedStorageV2Server struct {
|
||||
}
|
||||
|
||||
func (UnimplementedStorageV2Server) Init(context.Context, *StorageV2InitRequest) (*StorageV2InitResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Init not implemented")
|
||||
}
|
||||
func (UnimplementedStorageV2Server) ReadObject(*StorageV2ReadObjectRequest, StorageV2_ReadObjectServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ReadObject not implemented")
|
||||
}
|
||||
|
@ -207,6 +223,24 @@ func RegisterStorageV2Server(s grpc.ServiceRegistrar, srv StorageV2Server) {
|
|||
s.RegisterService(&StorageV2_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _StorageV2_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StorageV2InitRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(StorageV2Server).Init(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/shutdown.StorageV2/Init",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(StorageV2Server).Init(ctx, req.(*StorageV2InitRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _StorageV2_ReadObject_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(StorageV2ReadObjectRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
|
@ -318,6 +352,10 @@ var StorageV2_ServiceDesc = grpc.ServiceDesc{
|
|||
ServiceName: "shutdown.StorageV2",
|
||||
HandlerType: (*StorageV2Server)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Init",
|
||||
Handler: _StorageV2_Init_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "HeadObject",
|
||||
Handler: _StorageV2_HeadObject_Handler,
|
||||
|
|
33
integrations/storage/v2/shared/init.go
Normal file
33
integrations/storage/v2/shared/init.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package shared
|
||||
|
||||
type InitOptions struct {
|
||||
Create *bool
|
||||
}
|
||||
|
||||
func (i *InitOptions) GetCreate() bool {
|
||||
if i == nil || i.Create == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return *i.Create
|
||||
}
|
|
@ -55,6 +55,7 @@ type Info struct {
|
|||
}
|
||||
|
||||
type IO interface {
|
||||
Init(ctx context.Context, opts *InitOptions) error
|
||||
Write(ctx context.Context, key string) (Writer, error)
|
||||
Read(ctx context.Context, key string) (Reader, error)
|
||||
Head(ctx context.Context, key string) (*Info, error)
|
||||
|
|
50
integrations/storage/v2/shared/s3/init.go
Normal file
50
integrations/storage/v2/shared/s3/init.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
pbImplStorageV2Shared "github.com/arangodb/kube-arangodb/integrations/storage/v2/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
)
|
||||
|
||||
func (i *ios) Init(ctx context.Context, opts *pbImplStorageV2Shared.InitOptions) error {
|
||||
if opts.GetCreate() {
|
||||
if _, err := i.client.HeadBucketWithContext(ctx, &s3.HeadBucketInput{
|
||||
Bucket: util.NewType(i.config.BucketName),
|
||||
}); err != nil {
|
||||
if !IsAWSNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := i.client.CreateBucketWithContext(ctx, &s3.CreateBucketInput{
|
||||
Bucket: util.NewType(i.config.BucketName),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -277,3 +277,13 @@ func (i *implementation) ListObjects(req *pbStorageV2.StorageV2ListObjectsReques
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *implementation) Init(ctx context.Context, in *pbStorageV2.StorageV2InitRequest) (*pbStorageV2.StorageV2InitResponse, error) {
|
||||
if err := i.io.Init(ctx, &pbImplStorageV2Shared.InitOptions{
|
||||
Create: util.NewPointer(in.Create),
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pbStorageV2.StorageV2InitResponse{}, nil
|
||||
}
|
||||
|
|
|
@ -146,32 +146,7 @@ func testFileListing(t *testing.T, ctx context.Context, h pbStorageV2.StorageV2C
|
|||
|
||||
t.Run("UploadAll", func(t *testing.T) {
|
||||
util.ParallelProcess(func(in string) {
|
||||
wr, err := h.WriteObject(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
buff := make([]byte, 1024)
|
||||
|
||||
cf := bytes.NewReader(data)
|
||||
|
||||
for {
|
||||
n, err := cf.Read(buff)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, wr.Send(&pbStorageV2.StorageV2WriteObjectRequest{
|
||||
Path: &pbStorageV2.StorageV2Path{
|
||||
Path: in,
|
||||
},
|
||||
Chunk: buff[:n],
|
||||
}))
|
||||
}
|
||||
|
||||
ds, err := wr.CloseAndRecv()
|
||||
ds, err := pbStorageV2.Send(ctx, h, in, bytes.NewReader(data))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, ds)
|
||||
|
@ -268,32 +243,7 @@ func testS3BucketFileHandling(t *testing.T, ctx context.Context, h pbStorageV2.S
|
|||
})
|
||||
|
||||
t.Run("Send Object", func(t *testing.T) {
|
||||
wr, err := h.WriteObject(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
buff := make([]byte, 1024)
|
||||
|
||||
cf := bytes.NewReader(dataOne)
|
||||
|
||||
for {
|
||||
n, err := cf.Read(buff)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, wr.Send(&pbStorageV2.StorageV2WriteObjectRequest{
|
||||
Path: &pbStorageV2.StorageV2Path{
|
||||
Path: name,
|
||||
},
|
||||
Chunk: buff[:n],
|
||||
}))
|
||||
}
|
||||
|
||||
ds, err := wr.CloseAndRecv()
|
||||
ds, err := pbStorageV2.Send(ctx, h, name, bytes.NewReader(dataOne))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, ds)
|
||||
|
@ -313,23 +263,10 @@ func testS3BucketFileHandling(t *testing.T, ctx context.Context, h pbStorageV2.S
|
|||
})
|
||||
|
||||
t.Run("Download Object", func(t *testing.T) {
|
||||
wr, err := h.ReadObject(ctx, &pbStorageV2.StorageV2ReadObjectRequest{
|
||||
Path: &pbStorageV2.StorageV2Path{Path: name},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
data := bytes.NewBuffer(nil)
|
||||
|
||||
for {
|
||||
resp, err := wr.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = util.WriteAll(data, resp.GetChunk())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
n, err := pbStorageV2.Receive(ctx, h, name, data)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, n, size)
|
||||
|
||||
pdata := data.Bytes()
|
||||
|
||||
|
@ -340,32 +277,7 @@ func testS3BucketFileHandling(t *testing.T, ctx context.Context, h pbStorageV2.S
|
|||
})
|
||||
|
||||
t.Run("Re-Send Object", func(t *testing.T) {
|
||||
wr, err := h.WriteObject(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
buff := make([]byte, 1024)
|
||||
|
||||
cf := bytes.NewReader(dataTwo)
|
||||
|
||||
for {
|
||||
n, err := cf.Read(buff)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, wr.Send(&pbStorageV2.StorageV2WriteObjectRequest{
|
||||
Path: &pbStorageV2.StorageV2Path{
|
||||
Path: name,
|
||||
},
|
||||
Chunk: buff[:n],
|
||||
}))
|
||||
}
|
||||
|
||||
ds, err := wr.CloseAndRecv()
|
||||
ds, err := pbStorageV2.Send(ctx, h, name, bytes.NewReader(dataTwo))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, ds)
|
||||
|
@ -380,32 +292,7 @@ func testS3BucketFileHandling(t *testing.T, ctx context.Context, h pbStorageV2.S
|
|||
})
|
||||
|
||||
t.Run("Send Second Object", func(t *testing.T) {
|
||||
wr, err := h.WriteObject(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
buff := make([]byte, 1024)
|
||||
|
||||
cf := bytes.NewReader(dataOne)
|
||||
|
||||
for {
|
||||
n, err := cf.Read(buff)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, wr.Send(&pbStorageV2.StorageV2WriteObjectRequest{
|
||||
Path: &pbStorageV2.StorageV2Path{
|
||||
Path: nameTwo,
|
||||
},
|
||||
Chunk: buff[:n],
|
||||
}))
|
||||
}
|
||||
|
||||
ds, err := wr.CloseAndRecv()
|
||||
ds, err := pbStorageV2.Send(ctx, h, nameTwo, bytes.NewReader(dataOne))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, ds)
|
||||
|
@ -414,23 +301,10 @@ func testS3BucketFileHandling(t *testing.T, ctx context.Context, h pbStorageV2.S
|
|||
})
|
||||
|
||||
t.Run("Re-Download Object", func(t *testing.T) {
|
||||
wr, err := h.ReadObject(ctx, &pbStorageV2.StorageV2ReadObjectRequest{
|
||||
Path: &pbStorageV2.StorageV2Path{Path: name},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
data := bytes.NewBuffer(nil)
|
||||
|
||||
for {
|
||||
resp, err := wr.Recv()
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = util.WriteAll(data, resp.GetChunk())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
n, err := pbStorageV2.Receive(ctx, h, name, data)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, n, size)
|
||||
|
||||
pdata := data.Bytes()
|
||||
|
||||
|
|
|
@ -277,6 +277,11 @@ func (in *ArangoRouteSpecDestinationTLS) DeepCopy() *ArangoRouteSpecDestinationT
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ArangoRouteSpecOptionUpgrade) DeepCopyInto(out *ArangoRouteSpecOptionUpgrade) {
|
||||
*out = *in
|
||||
if in.Enabled != nil {
|
||||
in, out := &in.Enabled, &out.Enabled
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -296,7 +301,9 @@ func (in *ArangoRouteSpecOptions) DeepCopyInto(out *ArangoRouteSpecOptions) {
|
|||
if in.Upgrade != nil {
|
||||
in, out := &in.Upgrade, &out.Upgrade
|
||||
*out = make(ArangoRouteSpecOptionsUpgrade, len(*in))
|
||||
copy(*out, *in)
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -316,7 +323,9 @@ func (in ArangoRouteSpecOptionsUpgrade) DeepCopyInto(out *ArangoRouteSpecOptions
|
|||
{
|
||||
in := &in
|
||||
*out = make(ArangoRouteSpecOptionsUpgrade, len(*in))
|
||||
copy(*out, *in)
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ package v1alpha1
|
|||
import api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
||||
const (
|
||||
SpecValidCondition api.ConditionType = "SpecValid"
|
||||
ReadyCondition api.ConditionType = "Ready"
|
||||
DeploymentFoundCondition api.ConditionType = "DeploymentFound"
|
||||
SpecValidCondition api.ConditionType = "SpecValid"
|
||||
ReadyCondition api.ConditionType = "Ready"
|
||||
)
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/networking"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/platform"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -36,7 +36,7 @@ var (
|
|||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: networking.ArangoNetworkingGroupName, Version: ArangoPlatformVersion}
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: platform.ArangoPlatformGroupName, Version: ArangoPlatformVersion}
|
||||
)
|
||||
|
||||
// Resource gets an ArangoCluster GroupResource for a specified resource
|
||||
|
|
|
@ -23,7 +23,7 @@ package v1alpha1
|
|||
import (
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/networking"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/platform"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
@ -53,7 +53,7 @@ func (a *ArangoPlatformStorage) AsOwner() meta.OwnerReference {
|
|||
trueVar := true
|
||||
return meta.OwnerReference{
|
||||
APIVersion: SchemeGroupVersion.String(),
|
||||
Kind: networking.ArangoRouteResourceKind,
|
||||
Kind: platform.ArangoPlatformStorageResourceKind,
|
||||
Name: a.Name,
|
||||
UID: a.UID,
|
||||
Controller: &trueVar,
|
||||
|
|
|
@ -25,21 +25,10 @@ import (
|
|||
)
|
||||
|
||||
type ArangoPlatformStorageSpec struct {
|
||||
// Deployment specifies the ArangoDeployment object name
|
||||
Deployment *string `json:"deployment,omitempty"`
|
||||
|
||||
// Backend defines how storage is implemented
|
||||
Backend *ArangoPlatformStorageSpecBackend `json:"backend,omitempty"`
|
||||
}
|
||||
|
||||
func (s *ArangoPlatformStorageSpec) GetDeployment() string {
|
||||
if s == nil || s.Deployment == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return *s.Deployment
|
||||
}
|
||||
|
||||
func (s *ArangoPlatformStorageSpec) GetBackend() *ArangoPlatformStorageSpecBackend {
|
||||
if s == nil || s.Backend == nil {
|
||||
return nil
|
||||
|
|
|
@ -22,14 +22,10 @@ package v1alpha1
|
|||
|
||||
import (
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
sharedApi "github.com/arangodb/kube-arangodb/pkg/apis/shared/v1"
|
||||
)
|
||||
|
||||
type ArangoPlatformStorageStatus struct {
|
||||
// Conditions specific to the entire storage
|
||||
// +doc/type: api.Conditions
|
||||
Conditions api.ConditionList `json:"conditions,omitempty"`
|
||||
|
||||
// Deployment keeps the ArangoDeployment reference
|
||||
Deployment *sharedApi.Object `json:"deployment,omitempty"`
|
||||
}
|
||||
|
|
10
pkg/apis/platform/v1alpha1/zz_generated.deepcopy.go
generated
10
pkg/apis/platform/v1alpha1/zz_generated.deepcopy.go
generated
|
@ -95,11 +95,6 @@ func (in *ArangoPlatformStorageList) DeepCopyObject() runtime.Object {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ArangoPlatformStorageSpec) DeepCopyInto(out *ArangoPlatformStorageSpec) {
|
||||
*out = *in
|
||||
if in.Deployment != nil {
|
||||
in, out := &in.Deployment, &out.Deployment
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Backend != nil {
|
||||
in, out := &in.Backend, &out.Backend
|
||||
*out = new(ArangoPlatformStorageSpecBackend)
|
||||
|
@ -237,11 +232,6 @@ func (in *ArangoPlatformStorageStatus) DeepCopyInto(out *ArangoPlatformStorageSt
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Deployment != nil {
|
||||
in, out := &in.Deployment, &out.Deployment
|
||||
*out = new(v1.Object)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -68,9 +68,6 @@ v1alpha1:
|
|||
type: string
|
||||
type: object
|
||||
type: object
|
||||
deployment:
|
||||
description: Deployment specifies the ArangoDeployment object name
|
||||
type: string
|
||||
type: object
|
||||
status:
|
||||
description: Object with preserved fields for backward compatibility
|
||||
|
|
|
@ -47,6 +47,7 @@ var rootFactories = []shared.Factory{
|
|||
kubernetes.Backup(),
|
||||
kubernetes.Scheduler(),
|
||||
kubernetes.Networking(),
|
||||
kubernetes.Platform(),
|
||||
}
|
||||
|
||||
func InitCommand(cmd *cobra.Command) {
|
||||
|
|
47
pkg/debug_package/generators/kubernetes/arango_platform.go
Normal file
47
pkg/debug_package/generators/kubernetes/arango_platform.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/debug_package/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||
)
|
||||
|
||||
func Platform() shared.Factory {
|
||||
return shared.NewFactory("platform", true, platform)
|
||||
}
|
||||
|
||||
func platform(logger zerolog.Logger, files chan<- shared.File) error {
|
||||
k, ok := kclient.GetDefaultFactory().Client()
|
||||
if !ok {
|
||||
return errors.Errorf("Client is not initialised")
|
||||
}
|
||||
|
||||
if err := platformArangoPlatformStorages(logger, files, k); err != nil {
|
||||
logger.Err(err).Msgf("Error while collecting platform arango storages")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
73
pkg/debug_package/generators/kubernetes/arango_platform_s.go
Normal file
73
pkg/debug_package/generators/kubernetes/arango_platform_s.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
platformApi "github.com/arangodb/kube-arangodb/pkg/apis/platform/v1alpha1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/debug_package/cli"
|
||||
"github.com/arangodb/kube-arangodb/pkg/debug_package/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/kerrors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||
)
|
||||
|
||||
func platformArangoPlatformStorages(logger zerolog.Logger, files chan<- shared.File, client kclient.Client) error {
|
||||
arangoPlatformStorages, err := listNetowkingArangoPlatformStorages(client)
|
||||
if err != nil {
|
||||
if kerrors.IsForbiddenOrNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := errors.ExecuteWithErrorArrayP2(platformArangoPlatformStorage, client, files, arangoPlatformStorages...); err != nil {
|
||||
logger.Err(err).Msgf("Error while collecting platform arango storages")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func platformArangoPlatformStorage(client kclient.Client, files chan<- shared.File, ext *platformApi.ArangoPlatformStorage) error {
|
||||
files <- shared.NewYAMLFile(fmt.Sprintf("kubernetes/arango/platform/arangoplatformstorages/%s.yaml", ext.GetName()), func() ([]interface{}, error) {
|
||||
return []interface{}{ext}, nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listNetowkingArangoPlatformStorages(client kclient.Client) ([]*platformApi.ArangoPlatformStorage, error) {
|
||||
return ListObjects[*platformApi.ArangoPlatformStorageList, *platformApi.ArangoPlatformStorage](context.Background(), client.Arango().PlatformV1alpha1().ArangoPlatformStorages(cli.GetInput().Namespace), func(result *platformApi.ArangoPlatformStorageList) []*platformApi.ArangoPlatformStorage {
|
||||
q := make([]*platformApi.ArangoPlatformStorage, len(result.Items))
|
||||
|
||||
for id, e := range result.Items {
|
||||
q[id] = e.DeepCopy()
|
||||
}
|
||||
|
||||
return q
|
||||
})
|
||||
}
|
|
@ -30,6 +30,7 @@ import (
|
|||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
platformApi "github.com/arangodb/kube-arangodb/pkg/apis/platform/v1alpha1"
|
||||
schedulerApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1"
|
||||
schedulerContainerApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/container"
|
||||
schedulerContainerResourcesApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/container/resources"
|
||||
|
@ -75,12 +76,17 @@ func (r *Resources) EnsureArangoProfiles(ctx context.Context, cachedStatus inspe
|
|||
|
||||
reconcileRequired := k8sutil.NewReconcile(cachedStatus)
|
||||
|
||||
gen := func(name, version string, integrations ...sidecar.Integration) func() (string, *schedulerApi.ArangoProfile, error) {
|
||||
gen := func(name, version string, generator func() (sidecar.Integration, bool)) func() (string, *schedulerApi.ArangoProfile, error) {
|
||||
return func() (string, *schedulerApi.ArangoProfile, error) {
|
||||
counterMetric.Inc()
|
||||
fullName := fmt.Sprintf("%s-int-%s-%s", deploymentName, name, version)
|
||||
|
||||
integration, err := sidecar.NewIntegrationEnablement(integrations...)
|
||||
intgr, exists := generator()
|
||||
if !exists {
|
||||
return fullName, nil, nil
|
||||
}
|
||||
|
||||
integration, err := sidecar.NewIntegrationEnablement(intgr)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -106,6 +112,12 @@ func (r *Resources) EnsureArangoProfiles(ctx context.Context, cachedStatus inspe
|
|||
}
|
||||
}
|
||||
|
||||
always := func(in sidecar.Integration) func() (sidecar.Integration, bool) {
|
||||
return func() (sidecar.Integration, bool) {
|
||||
return in, true
|
||||
}
|
||||
}
|
||||
|
||||
if changed, err := r.ensureArangoProfilesFactory(ctx, cachedStatus,
|
||||
func() (string, *schedulerApi.ArangoProfile, error) {
|
||||
counterMetric.Inc()
|
||||
|
@ -134,12 +146,24 @@ func (r *Resources) EnsureArangoProfiles(ctx context.Context, cachedStatus inspe
|
|||
},
|
||||
}, nil
|
||||
},
|
||||
gen(constants.ProfilesIntegrationAuthz, constants.ProfilesIntegrationV0, sidecar.IntegrationAuthorizationV0{}),
|
||||
gen(constants.ProfilesIntegrationAuthn, constants.ProfilesIntegrationV1, sidecar.IntegrationAuthenticationV1{Spec: spec, DeploymentName: apiObject.GetName()}),
|
||||
gen(constants.ProfilesIntegrationSched, constants.ProfilesIntegrationV1, sidecar.IntegrationSchedulerV1{}),
|
||||
gen(constants.ProfilesIntegrationSched, constants.ProfilesIntegrationV2, sidecar.IntegrationSchedulerV2{}),
|
||||
gen(constants.ProfilesIntegrationEnvoy, constants.ProfilesIntegrationV3, sidecar.IntegrationEnvoyV3{Spec: spec}),
|
||||
); err != nil {
|
||||
gen(constants.ProfilesIntegrationAuthz, constants.ProfilesIntegrationV0, always(sidecar.IntegrationAuthorizationV0{})),
|
||||
gen(constants.ProfilesIntegrationAuthn, constants.ProfilesIntegrationV1, always(sidecar.IntegrationAuthenticationV1{Spec: spec, DeploymentName: apiObject.GetName()})),
|
||||
gen(constants.ProfilesIntegrationSched, constants.ProfilesIntegrationV1, always(sidecar.IntegrationSchedulerV1{})),
|
||||
gen(constants.ProfilesIntegrationSched, constants.ProfilesIntegrationV2, always(sidecar.IntegrationSchedulerV2{})),
|
||||
gen(constants.ProfilesIntegrationEnvoy, constants.ProfilesIntegrationV3, always(sidecar.IntegrationEnvoyV3{Spec: spec})),
|
||||
gen(constants.ProfilesIntegrationStorage, constants.ProfilesIntegrationV2, func() (sidecar.Integration, bool) {
|
||||
if v, err := cachedStatus.ArangoPlatformStorage().V1Alpha1(); err == nil {
|
||||
if p, ok := v.GetSimple(deploymentName); ok {
|
||||
if p.Status.Conditions.IsTrue(platformApi.ReadyCondition) {
|
||||
return sidecar.IntegrationStorageV2{
|
||||
Storage: p,
|
||||
}, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
})); err != nil {
|
||||
return err
|
||||
} else if changed {
|
||||
reconcileRequired.Required()
|
||||
|
@ -213,7 +237,7 @@ func (r *Resources) ensureArangoProfilesFactory(ctx context.Context, cachedStatu
|
|||
func (r *Resources) ensureArangoProfile(ctx context.Context, cachedStatus inspectorInterface.Inspector, name string, expected *schedulerApi.ArangoProfile) (bool, error) {
|
||||
arangoProfiles := cachedStatus.ArangoProfileModInterface().V1Beta1()
|
||||
|
||||
if expected.GetName() != name {
|
||||
if expected != nil && expected.GetName() != name {
|
||||
return false, errors.Errorf("Name mismatch")
|
||||
}
|
||||
|
||||
|
|
|
@ -634,6 +634,7 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
}
|
||||
// Create event
|
||||
r.context.CreateEvent(k8sutil.NewPodCreatedEvent(m.Pod.GetName(), role, apiObject))
|
||||
cachedStatus.GetThrottles().Pod().Invalidate()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -777,6 +778,10 @@ func (r *Resources) EnsurePods(ctx context.Context, cachedStatus inspectorInterf
|
|||
|
||||
log.Warn("Ensuring pod")
|
||||
|
||||
if err := cachedStatus.Refresh(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spec := r.context.GetSpec()
|
||||
if err := r.createPodForMember(ctx, cachedStatus, spec, member, m.ID, imageNotFoundOnce); err != nil {
|
||||
log.Err(err).Warn("Ensuring pod failed")
|
||||
|
|
|
@ -28,50 +28,30 @@ import (
|
|||
|
||||
networkingApi "github.com/arangodb/kube-arangodb/pkg/apis/networking/v1alpha1"
|
||||
platformApi "github.com/arangodb/kube-arangodb/pkg/apis/platform/v1alpha1"
|
||||
sharedApi "github.com/arangodb/kube-arangodb/pkg/apis/shared/v1"
|
||||
operator "github.com/arangodb/kube-arangodb/pkg/operatorV2"
|
||||
"github.com/arangodb/kube-arangodb/pkg/operatorV2/operation"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
)
|
||||
|
||||
func (h *handler) HandleArangoDeployment(ctx context.Context, item operation.Item, extension *platformApi.ArangoPlatformStorage, status *platformApi.ArangoPlatformStorageStatus) (bool, error) {
|
||||
var name = util.WithDefault(extension.Spec.Deployment)
|
||||
|
||||
if status.Deployment != nil {
|
||||
name = status.Deployment.GetName()
|
||||
}
|
||||
|
||||
deployment, err := util.WithKubernetesContextTimeoutP2A2(ctx, h.client.DatabaseV1().ArangoDeployments(item.Namespace).Get, name, meta.GetOptions{})
|
||||
deployment, err := util.WithKubernetesContextTimeoutP2A2(ctx, h.client.DatabaseV1().ArangoDeployments(item.Namespace).Get, extension.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
if apiErrors.IsNotFound(err) {
|
||||
// Condition for Found should be set to false
|
||||
if util.Or(
|
||||
status.Conditions.Update(networkingApi.DeploymentFoundCondition, false, "ArangoDeployment not found", "ArangoDeployment not found"),
|
||||
status.Conditions.Update(platformApi.DeploymentFoundCondition, false, "ArangoDeployment not found", "ArangoDeployment not found"),
|
||||
) {
|
||||
return true, operator.Reconcile("Conditions updated")
|
||||
}
|
||||
return false, nil
|
||||
return false, operator.Stop("Deployment Not Found")
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if status.Deployment == nil {
|
||||
status.Deployment = util.NewType(sharedApi.NewObject(deployment))
|
||||
return true, operator.Reconcile("Deployment saved")
|
||||
} else if !status.Deployment.Equals(deployment) {
|
||||
if util.Or(
|
||||
status.Conditions.Update(networkingApi.DeploymentFoundCondition, false, "ArangoDeployment changed", "ArangoDeployment changed"),
|
||||
) {
|
||||
return true, operator.Reconcile("Conditions updated")
|
||||
}
|
||||
|
||||
return false, operator.Stop("ArangoDeployment Changed")
|
||||
}
|
||||
|
||||
// Condition for Found should be set to true
|
||||
|
||||
if status.Conditions.Update(networkingApi.DeploymentFoundCondition, true, "ArangoDeployment found", "ArangoDeployment found") {
|
||||
if status.Conditions.UpdateWithHash(networkingApi.DeploymentFoundCondition, true, "ArangoDeployment found", "ArangoDeployment found", string(deployment.GetUID())) {
|
||||
return true, operator.Reconcile("Conditions updated")
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,12 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
schedulerApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1"
|
||||
schedulerPodApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod"
|
||||
schedulerPodResourcesApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod/resources"
|
||||
"github.com/arangodb/kube-arangodb/pkg/operatorV2/operation"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/tests"
|
||||
|
@ -205,7 +208,20 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
handler := newFakeHandler()
|
||||
|
||||
// Arrange
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", tests.MarkArangoProfileAsReady)
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", func(t *testing.T, obj *schedulerApi.ArangoProfile) {
|
||||
obj.Spec.Template = &schedulerApi.ProfileTemplate{
|
||||
Pod: &schedulerPodApi.Pod{
|
||||
Volumes: &schedulerPodResourcesApi.Volumes{
|
||||
Volumes: []core.Volume{
|
||||
{
|
||||
Name: "test",
|
||||
VolumeSource: core.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}, tests.MarkArangoProfileAsReady)
|
||||
extension := tests.NewMetaObject[*schedulerApi.ArangoSchedulerBatchJob](t, tests.FakeNamespace, "test",
|
||||
func(t *testing.T, obj *schedulerApi.ArangoSchedulerBatchJob) {
|
||||
obj.Spec.Profiles = []string{profile.GetName()}
|
||||
|
@ -227,4 +243,9 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
require.Len(t, batchJob.Spec.Template.Spec.Volumes, 1)
|
||||
require.EqualValues(t, "test", batchJob.Spec.Template.Spec.Volumes[0].Name)
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
schedulerApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1"
|
||||
schedulerPodApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod"
|
||||
schedulerPodResourcesApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod/resources"
|
||||
"github.com/arangodb/kube-arangodb/pkg/operatorV2/operation"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/tests"
|
||||
|
@ -206,7 +208,20 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
handler := newFakeHandler()
|
||||
|
||||
// Arrange
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", tests.MarkArangoProfileAsReady)
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", func(t *testing.T, obj *schedulerApi.ArangoProfile) {
|
||||
obj.Spec.Template = &schedulerApi.ProfileTemplate{
|
||||
Pod: &schedulerPodApi.Pod{
|
||||
Volumes: &schedulerPodResourcesApi.Volumes{
|
||||
Volumes: []core.Volume{
|
||||
{
|
||||
Name: "test",
|
||||
VolumeSource: core.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}, tests.MarkArangoProfileAsReady)
|
||||
extension := tests.NewMetaObject[*schedulerApi.ArangoSchedulerCronJob](t, tests.FakeNamespace, "test",
|
||||
func(t *testing.T, obj *schedulerApi.ArangoSchedulerCronJob) {
|
||||
obj.Spec.Profiles = []string{profile.GetName()}
|
||||
|
@ -228,4 +243,9 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
require.Len(t, cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes, 1)
|
||||
require.EqualValues(t, "test", cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].Name)
|
||||
}
|
||||
|
|
|
@ -148,8 +148,8 @@ func (h *handler) HandleObject(ctx context.Context, item operation.Item, extensi
|
|||
Labels: extension.ObjectMeta.Labels,
|
||||
Annotations: extension.ObjectMeta.Annotations,
|
||||
}
|
||||
extension.Spec.DeploymentSpec.DeepCopyInto(&obj.Spec)
|
||||
extension.Spec.DeploymentSpec.Template.DeepCopyInto(&obj.Spec.Template)
|
||||
deploymentTemplate.Spec.DeepCopyInto(&obj.Spec)
|
||||
deploymentTemplate.Spec.Template.DeepCopyInto(&obj.Spec.Template)
|
||||
|
||||
obj.OwnerReferences = append(obj.OwnerReferences, extension.AsOwner())
|
||||
|
||||
|
|
|
@ -25,9 +25,12 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
schedulerApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1"
|
||||
schedulerPodApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod"
|
||||
schedulerPodResourcesApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod/resources"
|
||||
"github.com/arangodb/kube-arangodb/pkg/operatorV2/operation"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/tests"
|
||||
|
@ -202,10 +205,24 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
handler := newFakeHandler()
|
||||
|
||||
// Arrange
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", tests.MarkArangoProfileAsReady)
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", func(t *testing.T, obj *schedulerApi.ArangoProfile) {
|
||||
obj.Spec.Template = &schedulerApi.ProfileTemplate{
|
||||
Pod: &schedulerPodApi.Pod{
|
||||
Volumes: &schedulerPodResourcesApi.Volumes{
|
||||
Volumes: []core.Volume{
|
||||
{
|
||||
Name: "test",
|
||||
VolumeSource: core.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}, tests.MarkArangoProfileAsReady)
|
||||
extension := tests.NewMetaObject[*schedulerApi.ArangoSchedulerDeployment](t, tests.FakeNamespace, "test",
|
||||
func(t *testing.T, obj *schedulerApi.ArangoSchedulerDeployment) {
|
||||
obj.Spec.Profiles = []string{profile.GetName()}
|
||||
obj.Spec.DeploymentSpec.Replicas = util.NewType[int32](10)
|
||||
})
|
||||
deployment := tests.NewMetaObject[*apps.Deployment](t, tests.FakeNamespace, "test")
|
||||
|
||||
|
@ -222,6 +239,11 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
tests.RefreshObjects(t, handler.kubeClient, handler.client, &deployment)
|
||||
require.NotNil(t, deployment)
|
||||
|
||||
require.NotNil(t, deployment.Spec.Replicas)
|
||||
require.EqualValues(t, 10, *deployment.Spec.Replicas)
|
||||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
require.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
|
||||
require.EqualValues(t, "test", deployment.Spec.Template.Spec.Volumes[0].Name)
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
schedulerApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1"
|
||||
schedulerPodApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod"
|
||||
schedulerPodResourcesApi "github.com/arangodb/kube-arangodb/pkg/apis/scheduler/v1beta1/pod/resources"
|
||||
"github.com/arangodb/kube-arangodb/pkg/operatorV2/operation"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/tests"
|
||||
|
@ -202,7 +204,20 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
handler := newFakeHandler()
|
||||
|
||||
// Arrange
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", tests.MarkArangoProfileAsReady)
|
||||
profile := tests.NewMetaObject[*schedulerApi.ArangoProfile](t, tests.FakeNamespace, "test", func(t *testing.T, obj *schedulerApi.ArangoProfile) {
|
||||
obj.Spec.Template = &schedulerApi.ProfileTemplate{
|
||||
Pod: &schedulerPodApi.Pod{
|
||||
Volumes: &schedulerPodResourcesApi.Volumes{
|
||||
Volumes: []core.Volume{
|
||||
{
|
||||
Name: "test",
|
||||
VolumeSource: core.VolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}, tests.MarkArangoProfileAsReady)
|
||||
extension := tests.NewMetaObject[*schedulerApi.ArangoSchedulerPod](t, tests.FakeNamespace, "test",
|
||||
func(t *testing.T, obj *schedulerApi.ArangoSchedulerPod) {
|
||||
obj.Spec.Profiles = []string{profile.GetName()}
|
||||
|
@ -224,4 +239,9 @@ func Test_Handler_Profile(t *testing.T) {
|
|||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
|
||||
require.Len(t, extension.Status.Profiles, 1)
|
||||
require.Equal(t, profile.GetName(), extension.Status.Profiles[0])
|
||||
require.Len(t, pod.Spec.Volumes, 1)
|
||||
require.EqualValues(t, "test", pod.Spec.Volumes[0].Name)
|
||||
}
|
||||
|
|
|
@ -36,12 +36,14 @@ import (
|
|||
|
||||
func NewFlagEnvHandler(fs *flag.FlagSet) FlagEnvHandler {
|
||||
return flagEnvHandler{
|
||||
fs: fs,
|
||||
fs: fs,
|
||||
visible: true,
|
||||
}
|
||||
}
|
||||
|
||||
type FlagEnvHandler interface {
|
||||
WithPrefix(prefix string) FlagEnvHandler
|
||||
WithVisibility(visible bool) FlagEnvHandler
|
||||
|
||||
StringVar(p *string, name string, value string, usage string) error
|
||||
String(name string, value string, usage string) error
|
||||
|
@ -60,8 +62,9 @@ type FlagEnvHandler interface {
|
|||
}
|
||||
|
||||
type flagEnvHandler struct {
|
||||
prefix string
|
||||
fs *flag.FlagSet
|
||||
prefix string
|
||||
visible bool
|
||||
fs *flag.FlagSet
|
||||
}
|
||||
|
||||
func (f flagEnvHandler) StringVar(p *string, name string, value string, usage string) error {
|
||||
|
@ -70,7 +73,15 @@ func (f flagEnvHandler) StringVar(p *string, name string, value string, usage st
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.StringVar(p, f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.StringVar(p, fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -81,7 +92,15 @@ func (f flagEnvHandler) String(name string, value string, usage string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.String(f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.String(fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -92,7 +111,15 @@ func (f flagEnvHandler) StringSliceVar(p *[]string, name string, value []string,
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.StringSliceVar(p, f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.StringSliceVar(p, fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -103,7 +130,15 @@ func (f flagEnvHandler) StringSlice(name string, value []string, usage string) e
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.StringSlice(f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.StringSlice(fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -114,7 +149,15 @@ func (f flagEnvHandler) BoolVar(p *bool, name string, value bool, usage string)
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.BoolVar(p, f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.BoolVar(p, fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -125,7 +168,15 @@ func (f flagEnvHandler) Bool(name string, value bool, usage string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.Bool(f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.Bool(fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -136,7 +187,15 @@ func (f flagEnvHandler) DurationVar(p *time.Duration, name string, value time.Du
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.DurationVar(p, f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.DurationVar(p, fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -147,7 +206,15 @@ func (f flagEnvHandler) Duration(name string, value time.Duration, usage string)
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.Duration(f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.Duration(fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -158,7 +225,15 @@ func (f flagEnvHandler) Uint16Var(p *uint16, name string, value uint16, usage st
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.Uint16Var(p, f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.Uint16Var(p, fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -169,7 +244,15 @@ func (f flagEnvHandler) Uint16(name string, value uint16, usage string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
f.fs.Uint16(f.name(name), v, f.varDesc(name, usage))
|
||||
fname := f.name(name)
|
||||
|
||||
f.fs.Uint16(fname, v, f.varDesc(name, usage))
|
||||
|
||||
if !f.visible {
|
||||
if err := f.fs.MarkHidden(fname); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -198,8 +281,17 @@ func (f flagEnvHandler) name(n string) string {
|
|||
|
||||
func (f flagEnvHandler) WithPrefix(prefix string) FlagEnvHandler {
|
||||
return flagEnvHandler{
|
||||
prefix: f.name(prefix),
|
||||
fs: f.fs,
|
||||
prefix: f.name(prefix),
|
||||
fs: f.fs,
|
||||
visible: f.visible,
|
||||
}
|
||||
}
|
||||
|
||||
func (f flagEnvHandler) WithVisibility(visible bool) FlagEnvHandler {
|
||||
return flagEnvHandler{
|
||||
prefix: f.prefix,
|
||||
fs: f.fs,
|
||||
visible: visible,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,20 @@ type Integration interface {
|
|||
Handler(ctx context.Context, cmd *cobra.Command) (svc.Handler, error)
|
||||
}
|
||||
|
||||
type IntegrationVisibility interface {
|
||||
Integration
|
||||
|
||||
Visible() bool
|
||||
}
|
||||
|
||||
func GetIntegrationVisibility(in Integration) bool {
|
||||
if v, ok := in.(IntegrationVisibility); ok {
|
||||
return v.Visible()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type IntegrationEnablement interface {
|
||||
Integration
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ func (c *configuration) Register(cmd *cobra.Command) error {
|
|||
for _, service := range c.registered {
|
||||
prefix := fmt.Sprintf("integration.%s", service.Name())
|
||||
|
||||
fs := f.WithPrefix(prefix)
|
||||
fs := f.WithPrefix(prefix).WithVisibility(GetIntegrationVisibility(service))
|
||||
internal, external := GetIntegrationEnablement(service)
|
||||
|
||||
if err := errors.Errors(
|
||||
|
|
173
pkg/integrations/sidecar/integration.storage.v2.go
Normal file
173
pkg/integrations/sidecar/integration.storage.v2.go
Normal file
|
@ -0,0 +1,173 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package sidecar
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
platformApi "github.com/arangodb/kube-arangodb/pkg/apis/platform/v1alpha1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/ml/storage"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/aws"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
const (
|
||||
mountNameStorageCredentials = "integration-credentials"
|
||||
mountNameStorageCA = "integration-ca"
|
||||
|
||||
mountPathStorageCredentials = "/secrets/credentials"
|
||||
mountPathStorageCA = "/secrets/ca"
|
||||
)
|
||||
|
||||
type IntegrationStorageV2 struct {
|
||||
Core *Core
|
||||
Storage *platformApi.ArangoPlatformStorage
|
||||
}
|
||||
|
||||
func (i IntegrationStorageV2) Name() []string {
|
||||
return []string{"STORAGE", "V2"}
|
||||
}
|
||||
|
||||
func (i IntegrationStorageV2) Validate() error {
|
||||
if i.Storage == nil {
|
||||
return errors.Errorf("Storage is nil")
|
||||
}
|
||||
|
||||
if err := i.Storage.Spec.Validate(); err != nil {
|
||||
return errors.Wrap(err, "Storage failed")
|
||||
}
|
||||
|
||||
if !i.Storage.Status.Conditions.IsTrue(platformApi.ReadyCondition) {
|
||||
return errors.Errorf("Storage is not Ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i IntegrationStorageV2) Envs() ([]core.EnvVar, error) {
|
||||
var envs = []core.EnvVar{
|
||||
{
|
||||
Name: "INTEGRATION_STORAGE_V2",
|
||||
Value: "true",
|
||||
},
|
||||
}
|
||||
|
||||
if s3 := i.Storage.Spec.GetBackend().GetS3(); s3 != nil {
|
||||
|
||||
endpointURL, _ := url.Parse(s3.GetEndpoint())
|
||||
disableSSL := endpointURL.Scheme == "http"
|
||||
|
||||
envs = append(envs,
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_TYPE",
|
||||
Value: string(storage.S3),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_ENDPOINT",
|
||||
Value: s3.GetEndpoint(),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_ALLOW_INSECURE",
|
||||
Value: strconv.FormatBool(s3.GetAllowInsecure()),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_DISABLE_SSL",
|
||||
Value: strconv.FormatBool(disableSSL),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_REGION",
|
||||
Value: s3.GetRegion(),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_BUCKET_NAME",
|
||||
Value: s3.GetBucketName(),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_BUCKET_PREFIX",
|
||||
Value: s3.GetBucketPrefix(),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_PROVIDER_TYPE",
|
||||
Value: string(aws.ProviderTypeFile),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_PROVIDER_FILE_SECRET_KEY",
|
||||
Value: filepath.Join(mountPathStorageCredentials, constants.SecretCredentialsSecretKey),
|
||||
},
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_PROVIDER_FILE_ACCESS_KEY",
|
||||
Value: filepath.Join(mountPathStorageCredentials, constants.SecretCredentialsAccessKey),
|
||||
},
|
||||
)
|
||||
|
||||
if !s3.GetCASecret().IsEmpty() {
|
||||
|
||||
envs = append(envs,
|
||||
core.EnvVar{
|
||||
Name: "INTEGRATION_STORAGE_V2_S3_CA",
|
||||
Value: filepath.Join(mountPathStorageCA, constants.SecretCACertificate),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return i.Core.Envs(i, envs...), nil
|
||||
}
|
||||
|
||||
func (i IntegrationStorageV2) GlobalEnvs() ([]core.EnvVar, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (i IntegrationStorageV2) Volumes() ([]core.Volume, []core.VolumeMount, error) {
|
||||
var volumeMounts []core.VolumeMount
|
||||
var volumes []core.Volume
|
||||
|
||||
if s := i.Storage.Spec.GetBackend().GetS3(); s != nil {
|
||||
secretObj := s.GetCredentialsSecret()
|
||||
if secretObj.GetNamespace(i.Storage) != i.Storage.GetNamespace() {
|
||||
return nil, nil, errors.New("secrets from different namespace are not supported yet")
|
||||
}
|
||||
volumes = append(volumes, k8sutil.CreateVolumeWithSecret(mountNameStorageCredentials, secretObj.GetName()))
|
||||
volumeMounts = append(volumeMounts, core.VolumeMount{
|
||||
Name: mountNameStorageCredentials,
|
||||
MountPath: mountPathStorageCredentials,
|
||||
})
|
||||
|
||||
if caSecret := s.GetCASecret(); !caSecret.IsEmpty() {
|
||||
if caSecret.GetNamespace(i.Storage) != i.Storage.GetNamespace() {
|
||||
return nil, nil, errors.New("secrets from different namespace are not supported yet")
|
||||
}
|
||||
volumes = append(volumes, k8sutil.CreateVolumeWithSecret(mountNameStorageCA, caSecret.GetName()))
|
||||
volumeMounts = append(volumeMounts, core.VolumeMount{
|
||||
Name: mountNameStorageCA,
|
||||
MountPath: mountPathStorageCA,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return volumes, volumeMounts, nil
|
||||
}
|
|
@ -70,3 +70,7 @@ func (b *storageV1) Handler(ctx context.Context, cmd *cobra.Command) (svc.Handle
|
|||
func (*storageV1) Init(ctx context.Context, cmd *cobra.Command) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*storageV1) Visible() bool {
|
||||
return false
|
||||
}
|
||||
|
|
75
pkg/integrations/storage_v2.go
Normal file
75
pkg/integrations/storage_v2.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2024 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
pbImplStorageV2 "github.com/arangodb/kube-arangodb/integrations/storage/v2"
|
||||
pbStorageV2 "github.com/arangodb/kube-arangodb/integrations/storage/v2/definition"
|
||||
awsHelper "github.com/arangodb/kube-arangodb/pkg/util/aws"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/svc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerer.Register(pbStorageV2.Name, func() Integration {
|
||||
return &storageV2{}
|
||||
})
|
||||
}
|
||||
|
||||
type storageV2 struct {
|
||||
Configuration pbImplStorageV2.Configuration
|
||||
}
|
||||
|
||||
func (b *storageV2) Name() string {
|
||||
return pbStorageV2.Name
|
||||
}
|
||||
|
||||
func (b *storageV2) Description() string {
|
||||
return "StorageBucket V2 Integration"
|
||||
}
|
||||
|
||||
func (b *storageV2) Register(cmd *cobra.Command, fs FlagEnvHandler) error {
|
||||
return errors.Errors(
|
||||
fs.StringVar((*string)(&b.Configuration.Type), "type", string(pbImplStorageV2.ConfigurationTypeS3), "Type of the Storage Integration"),
|
||||
fs.StringVar(&b.Configuration.S3.Client.Endpoint, "s3.endpoint", "", "Endpoint of S3 API implementation"),
|
||||
fs.StringSliceVar(&b.Configuration.S3.Client.TLS.CAFiles, "s3.ca", nil, "Path to file containing CA certificate to validate endpoint connection"),
|
||||
fs.BoolVar(&b.Configuration.S3.Client.TLS.Insecure, "s3.allow-insecure", false, "If set to true, the Endpoint certificates won't be checked"),
|
||||
fs.BoolVar(&b.Configuration.S3.Client.DisableSSL, "s3.disable-ssl", false, "If set to true, the SSL won't be used when connecting to Endpoint"),
|
||||
fs.StringVar(&b.Configuration.S3.Client.Region, "s3.region", "", "Region"),
|
||||
fs.StringVar(&b.Configuration.S3.BucketName, "s3.bucket.name", "", "Bucket name"),
|
||||
fs.StringVar(&b.Configuration.S3.BucketPrefix, "s3.bucket.prefix", "", "Bucket Prefix"),
|
||||
fs.StringVar((*string)(&b.Configuration.S3.Client.Provider.Type), "s3.provider.type", string(awsHelper.ProviderTypeFile), "S3 Credentials Provider type"),
|
||||
fs.StringVar(&b.Configuration.S3.Client.Provider.File.AccessKeyIDFile, "s3.provider.file.access-key", "", "Path to file containing S3 AccessKey"),
|
||||
fs.StringVar(&b.Configuration.S3.Client.Provider.File.SecretAccessKeyFile, "s3.provider.file.secret-key", "", "Path to file containing S3 SecretKey"),
|
||||
)
|
||||
}
|
||||
|
||||
func (b *storageV2) Handler(ctx context.Context, cmd *cobra.Command) (svc.Handler, error) {
|
||||
return pbImplStorageV2.New(b.Configuration)
|
||||
}
|
||||
|
||||
func (*storageV2) Init(ctx context.Context, cmd *cobra.Command) error {
|
||||
return nil
|
||||
}
|
|
@ -75,7 +75,7 @@ func (f *fileProvider) Retrieve() (credentials.Value, error) {
|
|||
|
||||
f.recent = util.RecentFileModTime(f.accessKeyIDFile, f.secretAccessKeyFile, f.sessionTokenFile)
|
||||
|
||||
return credentials.Value{}, nil
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (f *fileProvider) IsExpired() bool {
|
||||
|
|
|
@ -28,10 +28,11 @@ const ProfilesDeployment = ProfileGroup + "/deployment"
|
|||
const ProfilesIntegrationPrefix = "integration." + ProfileGroup
|
||||
|
||||
const (
|
||||
ProfilesIntegrationAuthn = "authn"
|
||||
ProfilesIntegrationAuthz = "authz"
|
||||
ProfilesIntegrationSched = "sched"
|
||||
ProfilesIntegrationEnvoy = "envoy"
|
||||
ProfilesIntegrationAuthn = "authn"
|
||||
ProfilesIntegrationAuthz = "authz"
|
||||
ProfilesIntegrationSched = "sched"
|
||||
ProfilesIntegrationEnvoy = "envoy"
|
||||
ProfilesIntegrationStorage = "storage"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -26,6 +26,15 @@ import (
|
|||
"reflect"
|
||||
)
|
||||
|
||||
// NewPointer returns a reference to a copy of the pointer value
|
||||
func NewPointer[T any](input *T) *T {
|
||||
if input == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return NewType(*input)
|
||||
}
|
||||
|
||||
// NewType returns a reference to a simple type with given value.
|
||||
func NewType[T interface{}](input T) *T {
|
||||
return &input
|
||||
|
|
Loading…
Reference in a new issue