chore: add pluto ci directory with default values as a starting point

This commit is contained in:
Tommy 2024-11-14 20:08:45 +01:00
parent 1d71a10562
commit c7cc3d4b59
Signed by: tommy
SSH key fingerprint: SHA256:1LWgQT3QPHIT29plS8jjXc3S1FcE/4oGvsx3Efxs6Uc
19 changed files with 1963 additions and 0 deletions

View file

@ -0,0 +1,32 @@
config:
externalUrl: https://cache.example.com/
persistence:
existingClaim: attic
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: attic-secret
envFromSecret: attic-secret
image:
repository: ghcr.io/zhaofengli/attic
tag: 4dbdbee45728d8ce5788db6461aaaa89d98081f0
postgres:
secretName: attic-secret
resources:
limits:
memory: "3Gi"
cpu: "1000m"
# requests:
# cpu: 100m
# memory: 250Mi

View file

@ -0,0 +1,39 @@
config:
baseUrl: https://infrastructure.252.no
image:
repository: index.docker.io/lyft/clutch
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
existingSecretName: clutch-config-secret
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
existingSecretName: clutch-dbinit-secret
prometheus:
podmonitor:
enabled: true
resources: {}
limits:
cpu: 500m
memory: 768Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podLabels: {}
podAnnotations: {}

View file

@ -0,0 +1,104 @@
# Default values for conduit.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: registry.gitlab.com/famedly/conduit/matrix-conduit
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
conduit:
server_name: "your.server.name"
allowRegistration: false
allowEncryption: true
allowFederation: true
allowRoomCreation: true
allowUnstableRoomVersions: true
trustedServers:
- "matrix.org"
maxRequestSize: "20000000" # in bytes, ~20 MB
# maxConcurrentRequests: 100
maxConcurrentRequests: # log: "warn,rocket=off,_=off,sled=off"
log:
registrationToken:
podLabels: {}
podAnnotations: {}
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 6167
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
annotations: {}
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 1Gi

View file

@ -0,0 +1,42 @@
envFromSecret: dispatch-secret
image:
registry: code.252.no
repository: elk-works/dispatch
tag: v20240731@sha256:e6c916d8bb6e02e7e23473229f3aa7643bcd00de5f59553794573a4a4737a635
pullPolicy: IfNotPresent
pullSecret: ""
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: dispatch-secret
url: http://localhost:80
authentication:
provider_slug: dispatch-auth-provider-header
header:
header_name: Tailscale-User-Login
core:
enabled: true
web:
enabled: true
service:
type: ClusterIP
port: 80
scheduler:
enabled: true
postgres:
hostname: "postgres-rw.databases.svc.cluster.local"
database_name: "dispatch"
port: "5432"
secretName: dispatch-secret

View file

@ -0,0 +1,78 @@
# Default values for element-call.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: ghcr.io/element-hq/element-call
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {} # fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
config:
homeserver:
base_url: http://localhost:8008
server_name: localhost
livekit:
livekit_service_url: https://localhost/
posthog:
api_key:
api_host: https://localhost
rageshake:
submit_url:
eula_url:

View file

View file

@ -0,0 +1,77 @@
initContainers:
dbInit:
image:
repository: mariadb
tag: "11.5.2"
mysql:
host: mariadb-dfir.databases.svc.cluster.local
port: 3306
existingSecretName: grr-mysql-init-secret
fleetspeak:
database: fleetspeak
grr:
database: grr
fleetspeak:
image:
repository: ghcr.io/google/fleetspeak
tag: latest
httpsHeaderChecksum: false
subjectCommonName: "fleetspeak-frontend"
admin:
replicas: 1
url: grr-fleetspeak-admin.sec-forensics.svc.cluster.local
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
frontend:
replicas: 1
url: grr-fleetspeak-frontend.sec-forensics.svc.cluster.local
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
grr:
image:
repository: "ghcr.io/google/grr"
tag: latest
subjectCommonName: grr.252.no
admin:
replicas: 1
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
frontend:
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
worker:
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
prometheus:
metricsPort: 9001

View file

View file

@ -0,0 +1,49 @@
image:
repository: sissbruecker/linkding
pullPolicy: IfNotPresent
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: linkding-secret
env:
- name: TZ
value: Europe/Oslo
envFromSecret: linkding-env-secret
postgres:
secretName: linkding-secret
persistence:
existingClaim: linkding
securityContext: {}
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - my-node-xyz
tolerations: []
# - key: node-role.kubernetes.io/control-plane
# operator: Exists
# effect: NoSchedule

View file

@ -0,0 +1,63 @@
nameOverride: ""
fullnameOverride: ""
existingSecretConfigMap: matrix-authentication-service-secret
postgres:
secretName: matrix-authentication-service-secret
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: matrix-authentication-service-secret
image:
registry: ghcr.io
repository: element-hq/matrix-authentication-service
pullPolicy: IfNotPresent
# -- Overrides the image tag whose default is the chart appVersion.
tag:
replicaCount: 1
podAnnotations: {}
podLabels: {}
podSecurityContext:
fsGroup: 1000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
livenessProbe:
httpGet:
port: internal
path: /health
readinessProbe:
httpGet:
port: internal
path: /health
prometheus:
servicemonitor:
enabled: true
labels: {}
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi

View file

@ -0,0 +1,974 @@
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: synapse-secret
prometheus:
podmonitor:
enabled: true
image:
repository: ghcr.io/element-hq/synapse
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Override part of the installed name, will still keep release name.
##
# nameOverride: ""
## Override the full name of the installed chart.
##
# fullnameOverride: ""
## The Matrix domain name, this is what will be used for the domain part in
## your MXIDs.
##
# serverName: 'example.com'
## The public Matrix server name, this will be used for any public URLs
## in config as well as for client API links in the ingress.
# publicServerName: 'matrix.example.com'
## The source of the signing key used by Synapse in federation.
##
signingkey:
## Enable a Kubernetes job to generate and store a signing key if one does not
## exist.
## If you have already run a Matrix server at some point on your domain then
## you will want to keep the old signing key, either by using the `existingSecret`
## configuration, or by including the old key under `extraConfig.old_signing_keys`
##
## If you lose your signing key then any federation traffic from your instance
## might not be trusted any more by the wider network.
##
job:
enabled: true
## Annotations to apply to the signing-key-job.
##
annotations: {}
# argocd.argoproj.io/hook: PostSync
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
generateImage:
repository: matrixdotorg/synapse
# tag: latest
pullPolicy: IfNotPresent
publishImage:
repository: bitnami/kubectl
# tag: latest
pullPolicy: IfNotPresent
## Specify an existing signing key secret, will need to be created in advance.
##
# existingSecret: secret-name
# existingSecretKey: signing.key
## Resources to apply to the signing key generation job
##
resources: {}
# limits:
# cpu: 100m
# memory: 250Mi
# requests:
# cpu: 100m
# memory: 250Mi
## Matrix configuration values that affect other parts of the chart, for any
## value not handled by this block, you will want to instead set it in
## extraConfig below.
## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
config:
## The publicly accessible URL for the Synapse instance, will default to
## https://<publicServerName>.
##
# publicBaseurl: 'https://matrix.example.com'
## The log level for Synapse and all modules.
##
# logLevel: INFO
## Should usage stats be reported
##
reportStats: false
## Protect registration with recaptcha. (optional)
##
# recaptcha:
# publicKey: ''
# privateKey: ''
## URIs and secret key for TURN servers to use to help establish 1:1 WebRTC
## calls.
##
# turnUris: []
# turnSecret: ''
## Registration configuration, note that registration with the
## container-internal register_new_matrix_user tool is always possible.
##
# enableRegistration: false
## NB; this value will default to a random string if not specified.
# registrationSharedSecret: ''
## NB; Strongly recommended to set this to a secure value.
# macaroonSecretKey: ''
## A set of trusted servers to contact if another server doesn't respond to a
## signing key request.
##
trustedKeyServers:
- server_name: matrix.org
# verify_keys:
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
## The bind addresses to use for the default listeners
##
# bindAddresses:
# - '::'
## Extra listeners to configure.
##
extraListeners: []
# - port: 9000
# bind_addresses: ['::']
# type: manhole
## Extra listener types to add onto the main listener.
##
extraMainListenerTypes: []
# - consent
## Logging
# use TerseJsonFormatter structured logging
# Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html
useStructuredLogging: false
## Specify arbitrary Synapse configuration here;
## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
extraConfig: {}
# old_signing_keys:
# "ed25519:id": { key: "base64string", expired_ts: 123456789123 }
# use_presence: false
# enable_search: false
# federation_domain_whitelist:
# - lon.example.com
# - nyc.example.com
# - syd.example.com
# dynamic_thumbnails: true
## Specify additional loggers configutation here;
## Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html
extraLoggers: {}
# synapse.storage.SQL:
# level: WARNING
## Specify arbitrary - secret - Synapse configuration here;
## These values will be stored in secrets instead of configmaps
## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
extraSecrets: {}
# password_config:
# pepper: ''
## Configuration to apply to the main Synapse pod.
##
synapse:
## Only really applicable when the deployment has an RWO PV attached (e.g. when media repository
## is enabled for the main Synapse pod)
## Since replicas = 1, an update can get "stuck", as the previous pod remains attached to the
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
##
strategy:
type: RollingUpdate
## Annotations to apply to the main Synapse pod.
##
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/path: "/_synapse/metrics"
# prometheus.io/port: "9090"
## Labels to apply to the main Synapse pod.
##
labels: {}
## Additional environment variables to apply to the main Synapse pod
##
extraEnv: []
# - name: LD_PRELOAD
# value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# - name: SYNAPSE_CACHE_FACTOR
# value: "2"
## Additional volumes to mount into Synapse
##
extraVolumes: []
# - name: spamcheck
# flexVolume:
# driver: ananace/git-live
# options:
# repo: https://github.com/company/synapse-module
# interval: 1d
# readOnly: true
extraVolumeMounts: []
# - name: spamcheck
# mountPath: /usr/local/lib/python3.7/site-packages/company
## Extra commands to run when starting Synapse
##
extraCommands: []
# - 'apt-get update -yqq && apt-get install patch -yqq'
# - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch'
## Configuration for the pod security policy, Synapse will by always run as
## its own user, even if not set.
## Note that changing this may also require you to use the volumePermission
## helper depending on your storage.
##
## NB; The synapse install is currently unable to run as anything but UID:GID
## 666:666.
##
podSecurityContext: {}
# fsGroup: 666
# runAsGroup: 666
# runAsUser: 666
## Configuration for the container security policy, refer to the above
## podSecurityContext for more relevant information.
##
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 666
## Resources to apply to the main Synapse pod.
##
resources: {}
# limits:
# cpu: 1000m
# memory: 2500Mi
# requests:
# cpu: 1000m
# memory: 2500Mi
## Liveness probe configuration to use
##
livenessProbe:
httpGet:
path: /health
port: http
## Readiness probe configuration to use
##
readinessProbe:
httpGet:
path: /health
port: http
## Startup probe configuration to use
##
startupProbe:
failureThreshold: 12
httpGet:
path: /health
port: http
## Node selectors to set for the main Synapse pod.
##
nodeSelector: {}
## Tolerations to set for the main Synapse pod.
##
tolerations: []
## Affinity to set for the main Synapse pod.
##
affinity: {}
## Configuration for handling Synapse workers, which are useful for handling
## high-load deployments.
##
## More information is available at;
## https://github.com/matrix-org/synapse/blob/master/docs/workers.md
##
workers:
## Default configuration, this is inherited into all workers, and can also be
## overriden on each worker type.
##
default:
## The number of worker replicas, note that some workers require special
## handling. Refer to the information URL above.
##
replicaCount: 1
## Update strategy - only really applicable for deployments with RWO PVs attached (e.g. media repository)
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
##
strategy:
type: RollingUpdate
## A specific name for this worker, can't be set globally.
## Note that this can only be set when replicaCount is 1
# name:
## Additional configuration to set for the worker, can't be set globally.
# extraConfig: {}
## Annotations to apply to the worker.
##
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/path: /_synapse/metrics
# prometheus.io/port: 9090
## Additional environment variables to add to the worker.
##
extraEnv: []
# - name: LD_PRELOAD
# value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# - name: SYNAPSE_CACHE_FACTOR
# value: "1.0"
## Additional volumes to add to the worker.
## Useful for the media repo, or for adding Python modules.
##
volumes: []
volumeMounts: []
## Extra commands to run when starting Synapse
##
extraCommands: []
# - 'apt-get update -yqq && apt-get install patch -yqq'
# - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch'
## Security context information to set to the worker.
##
podSecurityContext: {}
# fsGroup: 666
# runAsGroup: 666
# runAsUser: 666
## Container security context information to set to the worker.
##
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 666
## Resources to apply to the worker.
##
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Liveness probe configuration to use
##
livenessProbe:
httpGet:
path: /health
port: metrics
## Readiness probe configuration to use
##
readinessProbe:
httpGet:
path: /health
port: metrics
## Readiness probe configuration to use
## Defaults to allowing workers 60 seconds to start up
##
startupProbe:
failureThreshold: 6
httpGet:
path: /health
port: metrics
## Node selector configuration to set on the worker.
##
nodeSelector: {}
## Toleration configuration to set on the worker.
##
tolerations: []
## Affinity configuration to set on the worker.
##
affinity: {}
## The generic worker can be used to handle most endpoints.
## Be careful when enabling the sync endpoints as they can eat large amounts of
## resources. Refer to the information URL above for more info.
## Proper load balancing with the K8s Ingress resource may not be possible.
##
generic_worker:
enabled: false
generic: true
listeners: [ client, federation ]
csPaths:
## Sync requests
# - "/_matrix/client/(r0|v3)/sync$"
- "/_matrix/client/(api/v1|r0|v3)/events$"
# - "/_matrix/client/(api/v1|r0|v3)/initialSync$"
# - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$"
## Client API requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$"
- "/_matrix/client/v1/rooms/.*/hierarchy$"
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$"
- "/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$"
- "/_matrix/client/(r0|v3|unstable)/account/3pid$"
- "/_matrix/client/(r0|v3|unstable)/account/whoami$"
- "/_matrix/client/(r0|v3|unstable)/devices$"
- "/_matrix/client/versions$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/search$"
## Encryption requests
- "/_matrix/client/(r0|v3|unstable)/keys/query$"
- "/_matrix/client/(r0|v3|unstable)/keys/changes$"
- "/_matrix/client/(r0|v3|unstable)/keys/claim$"
- "/_matrix/client/(r0|v3|unstable)/room_keys/"
## Registration/login requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/login$"
- "/_matrix/client/(r0|v3|unstable)/register$"
- "/_matrix/client/v1/register/m.login.registration_token/validity$"
## Event sending requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|\
unban|kick)$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/join/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/profile/"
## User directory search requests
- "/_matrix/client/(r0|v3|unstable)/user_directory/search"
## Worker event streams
## See https://matrix-org.github.io/synapse/latest/workers.html#stream-writers
##
## The typing event stream
# - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
## The to_device event stream
# - "/_matrix/client/(r0|v3|unstable)/sendToDevice/"
## The account_data event stream
# - "/_matrix/client/(r0|v3|unstable)/.*/tags"
# - "/_matrix/client/(r0|v3|unstable)/.*/account_data"
## The receipts event stream
# - "/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt"
# - "/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers"
## The presence event stream
# - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/"
paths:
## Federation requests
- "/_matrix/federation/v1/event/"
- "/_matrix/federation/v1/state/"
- "/_matrix/federation/v1/state_ids/"
- "/_matrix/federation/v1/backfill/"
- "/_matrix/federation/v1/get_missing_events/"
- "/_matrix/federation/v1/publicRooms"
- "/_matrix/federation/v1/query/"
- "/_matrix/federation/v1/make_join/"
- "/_matrix/federation/v1/make_leave/"
- "/_matrix/federation/(v1|v2)/send_join/"
- "/_matrix/federation/(v1|v2)/send_leave/"
- "/_matrix/federation/(v1|v2)/invite/"
- "/_matrix/federation/v1/event_auth/"
- "/_matrix/federation/v1/exchange_third_party_invite/"
- "/_matrix/federation/v1/user/devices/"
- "/_matrix/key/v2/query"
- "/_matrix/federation/v1/hierarchy/"
## Inbound federation transaction request
- "/_matrix/federation/v1/send/"
## To separate the generic worker into specific concerns - for example federation transaction receiving;
## NB; This worker should have incoming traffic routed based on source IP, which is
## left as an exercise to the reader.
## https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing
# federation_reader:
# enabled: true
# generic: true
# listeners: [federation]
# paths:
# - "/_matrix/federation/v1/send/"
## Or /sync handling.
## NB; Care should be taken to route users to the same instance when scaling this worker,
## this is left as an exercise to the reader.
## https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing
# synchrotron:
# enabled: true
# generic: true
# listeners: [client]
# csPaths:
# - "/_matrix/client/(v2_alpha|r0|v3)/sync$"
# - "/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$"
# - "/_matrix/client/(api/v1|r0|v3)/initialSync$"
# - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$"
## Specialized - non-generic workers below;
## This worker deals with pushing notifications.
## NB; Only one instance of this worker can be run at a time, refer to the
## information URL above.
##
pusher:
enabled: false
## This worker handles sending data to registered appservices.
## NB; Only one instance of this worker can be run at at time, refer to the
## information URL above.
##
appservice:
enabled: false
generic: true
name: appservices
## This worker handles sending federation traffic to other Synapse servers.
##
federation_sender:
enabled: false
## Specialized workers can also be run as multiple separate instances,
## make sure to read the relevant documentation.
##
# federation_sender_other:
# app: federation_sender
# enabled: false
## This worker deals with serving and storing media.
## NB; Running multiple instances will conflict with background jobs.
##
media_repository:
enabled: false
listeners: [ media ]
csPaths:
- "/_matrix/media/.*"
- "/_synapse/admin/v1/purge_media_cache$"
- "/_synapse/admin/v1/room/.*/media"
- "/_synapse/admin/v1/user/.*/media"
- "/_synapse/admin/v1/media/"
- "/_synapse/admin/v1/quarantine_media/"
- "/_synapse/admin/v1/users/.*/media$"
paths:
- "/_matrix/media/.*"
## This worker deals with user directory searches.
##
user_dir:
enabled: false
name: userdir
listeners: [ client ]
csPaths:
- "/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
## This worker handles key uploads, and may also stub out presence if that is
## disabled. If you set extraConfig.use_presence=false then you may want to
## uncomment the second path.
##
frontend_proxy:
enabled: false
listeners: [ client ]
csPaths:
- "/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"
# - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status"
## This will set up a Lighttpd server to respond to any
## /.well-known/matrix/server requests, to make federation possible without
## adding SRV-records to DNS.
##
wellknown:
enabled: false
replicaCount: 1
# Lighttpd does not bind on IPv6 by default, although this is required in
# Ipv6-only clusters.
useIpv6: false
## The host and port combo to serve on .well-known/matrix/server.
##
server: {}
# m.server: matrix.example.com:443
## Data to serve on .well-known/matrix/client.
##
client: {}
# m.homeserver:
# base_url: https://matrix.example.com
## Configuration for the wellknown service.
##
service:
type: ClusterIP
port: 80
## Extra data objects to serve under .well-known/matrix/<data>
## Dictionaries will be JSON converted, plain strings will be served as they are
##
extraData: {}
## MSC1929 example;
# support:
# admins:
# - matrix_id: '@admin:example.com'
# email_address: 'admin@example.com'
# role: 'admin'
# support_page: 'https://example.com/support'
## A custom htdocs path, useful when running another image.
##
htdocsPath: /var/www/localhost/htdocs
## The lighttpd image to run.
##
image:
repository: ghcr.io/rtsp/docker-lighttpd
tag: latest
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Configuration for the pod security policy.
##
podSecurityContext: {}
# fsGroup: 101
# runAsGroup: 101
# runAsUser: 100
## Configuration for the container security policy.
##
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 100
## Resource configuration to apply to the well-known server.
##
resources: {}
# limits:
# cpu: 5m
# memory: 15Mi
# requests:
# cpu: 5m
# memory: 15Mi
## Node selectors to set for the well-known server.
##
nodeSelector: {}
## Tolerations to set for the well-known server.
##
tolerations: []
## Affinity to set for the main well-known server.
##
affinity: {}
## This configuration is for setting up the internally provided Postgres server,
## if you instead want to use an existing server, then you may want to set
## enabled to false and configure the externalPostgresql block.
##
postgresql:
enabled: true
auth:
# XXX Change me!
password: synapse
## Or use existing secret with "password" key
## instead of static password
##
# existingSecret: postgresql-secret
username: synapse
database: synapse
primary:
initdb:
args: "--lc-collate=C --lc-ctype=C"
persistence:
# storageClass: "-"
size: 16Gi
## Extra arguments for the database connection
## ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config
##
extraArgs: {}
## An externally configured Postgres server to use for Synapse's database, note
## that the database needs to have both COLLATE and CTYPE set to "C".
##
externalPostgresql:
# host: postgres
port: 5432
username: synapse
# password: synapse
## The name of an existing secret with postgresql credentials
# existingSecret: postgres-secrets
## Password key to be retrieved from existing secret
# existingSecretPasswordKey: postgres-password
database: synapse
# sslmode: prefer
## Extra arguments for the database connection
## ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config
##
extraArgs: {}
## This configuration is for the internal Redis that's deployed for use with
## workers/sharding, for an external Redis server you want to set enabled to
## false and configure the externalRedis block.
##
redis:
enabled: true
## Database ID for non-default database
# dbid: 0
auth:
enabled: true
# XXX Change me!
password: synapse
## Or use existing secret with "redis-password" key
## instead of static password
##
# existingSecret: redis-secret
architecture: standalone
master:
kind: Deployment
persistence:
## Note that Synapse only uses redis as a synchronization utility, so no
## data will ever need to be persisted.
##
enabled: false
service:
port: 6379
## An externally configured Redis server to use for workers/sharding.
##
externalRedis:
# host: redis
port: 6379
# password: synapse
## Database ID for non-default database
# dbid: 0
## The name of an existing secret with redis credentials
# existingSecret: redis-secrets
## Password key to be retrieved from existing secret
# existingSecretPasswordKey: redis-password
## Persistence configuration for the media repository function.
## This PVC will be mounted in either Synapse or a media_repo worker.
##
## NB; If you want to be able to scale this, you will have to set the
## accessMode to RWX/ReadWriteMany.
##
persistence:
enabled: true
# existingClaim: synapse-data
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
## Set up an init container to chown the mounted media if necessary.
##
volumePermissions:
enabled: false
uid: 666
gid: 666
image:
repository: alpine
tag: latest
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## Configuration for the main Synapse service.
## To use TLS inside Synapse itself, add an TLS listener, and change the ports
## configured in here.
##
service:
type: ClusterIP
port: 8008
targetPort: http
## The K8s ingress configuration, this will be quite heavily used in order to
## set up all routing necessary for use with a sharded Synapse instance.
## If you're not using a Ingress compatible K8s ingress, you will need to set up
## your own routing instead.
##
ingress:
enabled: true
## Generate traefik-compatible regex paths instead of nginx-compatible ones.
##
traefikPaths: false
## Annotations to apply to the created ingress resource.
##
annotations: {}
# nginx.ingress.kubernetes.io/use-regex: "true"
# # Sync proxy-body-size with Synapse's max_upload_size which is 10M by default
# nginx.ingress.kubernetes.io/proxy-body-size: 10m
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
## Hosts to add to the ingress configuration for handling Client-to-Server
## API request paths.
##
## NB; config.serverName is included if includeServerName is set. (default)
##
csHosts: []
# - matrix.example.com
## Additional hosts to add to the ingress configuration for handling
## Server-to-Server API requests.
##
## NB; config.serverName is included if includeServerName is set. (default)
##
hosts: []
# - example.com
## Additional hosts to add to the ingress configuration for handling
## well-known requests.
##
## NB; config.serverName is included if includeServerName is set. (default)
##
wkHosts: []
# - example.com
## Additional paths to add to the Server-to-Server ingress blocks, will be
## inserted before the /_matrix catch-all path.
##
paths: []
# # K8s 1.19+
# - path: /_matrix/media
# pathType: Prefix
# backend:
# service:
# name: matrix-media-repo
# port: 8000
# # K8s <1.19
# - path: /_matrix/media
# backend:
# serviceName: matrix-media-repo
# servicePort: 8000
## Additional paths to add to the Client-to-Server ingress blocks, will be
## inserted before the /_matrix and /_synapse catch-all paths.
##
csPaths: []
# # K8s 1.19+
# - path: /_matrix/media
# pathType: Prefix
# backend:
# service:
# name: matrix-media-repo
# port:
# number: 8000
# # K8s <1.19
# - path: /_matrix/media
# backend:
# serviceName: matrix-media-repo
# servicePort: 8000
## Should the /_synapse path be included in the ingress, admin APIs are
## provided under this path.
##
includeUnderscoreSynapse: true
## Should config.serverName be included in the list of ingress paths, can be
## set to false if the main domain is managed in some external way.
##
includeServerName: true
## TLS configuration to include in the ingress configuration
##
tls: []
# - secretName: chart-example-tls
# hosts:
# - example.com
# - matrix.example.com
## Set the name of the IngressClass cluster resource (optional)
## https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-v1/#IngressSpec
# className: can-be-anything
## Specifies whether a service account should be created, and annotations to add.
##
serviceAccount:
create: false
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::000000000000:role/matrix-synapse
# name: non-default-service-name

View file

@ -0,0 +1,30 @@
existingSecretConfigMap: maubot-secret
persistence:
existingClaim: maubot
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: maubot-secret
envFromSecret: maubot-secret
image:
repository: dock.mau.dev/maubot/maubot
tag: v0.5.0
postgres:
secretName: maubot-secret
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
cpu: 100m
memory: 50Mi

View file

@ -0,0 +1,136 @@
replicaCount: 1
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: netbox-secret
image:
repository: quay.io/netboxcommunity/netbox
tag: v4.1.6
pullPolicy: Always
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
existingSecret: netbox-secret
extraEnv: {}
# EMAIL_SERVER: localhost
# EMAIL_PORT: 25
# EMAIL_USERNAME: netbox
# EMAIL_TIMEOUT: 5
# EMAIL_FROM: netbox@example.com
# MAX_PAGE_SIZE: 1000
# WEBHOOKS_ENABLED: true
extraEnvFrom: []
# - configMapRef:
# name: custom-config
# - secretRef:
# name: custom-secrets
extraSecrets: {}
# EMAIL_PASSWORD: netbox
# Ends up stored as extra.py in the netbox configuration, must be valid Python
extraConfiguration: ''
# Will be stored in plugins.py in the netbox configuration, requires
# using/building a netbox image containing the required plugins, must be valid
# Python
#
# https://github.com/netbox-community/netbox-docker/wiki/Using-Netbox-Plugins
extraPlugins: ''
# PLUGINS = ["netbox_bgp"]
#
# PLUGINS_CONFIG = {
# "netbox_bgp": {
# ADD YOUR SETTINGS HERE
# }
# }
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 500m
# memory: 512Mi
nodeSelector: {}
tolerations: []
affinity: {}
extraVolumes: []
extraVolumeMounts: []
readinessProbe:
httpGet:
port: http
path: /
livenessProbe:
httpGet:
port: http
path: /
startupProbe:
tcpSocket:
port: http
failureThreshold: 12
persistence:
enabled: false
# existingClaim: netbox-data
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
worker:
enabled: true
replicaCount: 1
resources: {}
# limits:
# cpu: 100m
# memory: 150Mi
# requests:
# cpu: 100m
# memory: 150Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Handles the Netbox 3.0+ housekeeping pod
housekeeping:
enabled: true
resources: {}
# limits:
# cpu: 100m
# memory: 32Mi
# requests:
# cpu: 100m
# memory: 32Mi
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -0,0 +1,20 @@
config:
baseUrl: https://ntfy.sh
persistence:
existingClaim: ntfy-sh-server
image:
repository: binwiederhier/ntfy
prometheus:
podmonitor:
enabled: true
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
cpu: 100m
memory: 50Mi

View file

@ -0,0 +1,63 @@
homeserver: "https://synapse.example.com"
serverAddress: "https://slidingsync.example.com"
existingSecret: "slidingsync-secret"
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: slidingsync-secret
image:
repository: ghcr.io/matrix-org/sliding-sync
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
replicaCount: 1
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
service:
type: ClusterIP
port: 80
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -0,0 +1,37 @@
existingSecret: stalwart-mail-secret
extraEnvSecret: stalwart-mail-extraenv-secret
existingConfigMap: stalwart-mail-config
tlsSecret: stalwart-mail-tls
replicaCount: 1
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: stalwart-mail-secret
image:
repository: stalwartlabs/mail-server
pullPolicy: IfNotPresent
tag: ""
strategy:
type: Recreate
service:
type: NodePort
ports:
http: 8080
https: 443
smtp: 25
submission: 587
smtps: 465
imap: 143
imaps: 993
sieve: 4190
podAnnotations: {}

View file

@ -0,0 +1,54 @@
config:
externalUrl: https://timesketch.example.com/
existingConfSecret: timesketch-conf
existingUserSecret: timesketch-user
createUser: true
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: timesketch-secret
image:
repository: us-docker.pkg.dev/osdfir-registry/timesketch/timesketch
pullPolicy: IfNotPresent
tag: "20240508.1"
imagePullSecrets: []
worker:
podSecurityContext: {}
securityContext: {}
frontend:
podSecurityContext: {}
securityContext: {}
postgres:
secretName: timesketch-secret
upload:
persistence:
accessMode: ReadWriteMany
size: 10Gi
storageClass: ceph-filesystem
persistentVolumeClaim: timesketch-upload
resources:
limits:
memory: "3Gi"
cpu: "1000m"
requests:
cpu: 100m
memory: 250Mi
metrics:
enabled: true
port: 9001
caCert:
existingConfigMapName: cluster-certificates
existingConfigMapKey: opensearch-ca.crt

View file

@ -0,0 +1,93 @@
config:
externalUrl: https://turbinia.example.com/
persistence:
general:
storageClass: ceph-filesystem
accessMode: ReadWriteMany
size: 2Gi
evidence:
storageClass: ceph-filesystem
accessMode: ReadWriteMany
size: 2Gi
server:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-server
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
worker:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-worker
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext:
# Due to Turbinia attaching and detaching disks, a privileged container is required for the worker container.
privileged: true
nodeSelector: {}
tolerations: []
affinity: {}
autoscaling:
minReplicas: 1
maxReplicas: 2
targetCPUUtilizationPercentage: 50
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
api:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-api-server
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
controller:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-controller
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
metrics:
port: 9001

View file

@ -0,0 +1,72 @@
existingSecret: yeti-secret
existingConfigSecret: yeti-conf
frontend:
image:
repository: yetiplatform/yeti-frontend
tag: 2.1.13
pullPolicy: IfNotPresent
podSecurityContext: {}
containerSecurityContext: {}
nodeSelector: {}
affinity: {}
tolerations: {}
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "2Gi"
cpu: "1000m"
backend:
image:
repository: yetiplatform/yeti
tag: 2.1.13
pullPolicy: IfNotPresent
podSecurityContext: {}
api:
containerSecurityContext: {}
nodeSelector: {}
affinity: {}
tolerations: {}
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "2Gi"
cpu: "1000m"
tasks:
containerSecurityContext: {}
nodeSelector: {}
affinity: {}
tolerations: {}
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "2Gi"
cpu: "1000m"
redis:
host: dragonfly.databases.svc.cluster.local
port: 6397
arangodb:
database: yeti
host: arango-dfir-cluster-ea.databases.svc.cluster.local
port: 8529
timesketch:
enabled: false
endpoint: ""
serviceAccount:
name: "yeti"
annotations: {}
metrics:
enabled: true
port: 9001