Merge pull request 'chore: general chart syntax fixes' (#122) from general-version-cleanup into main
All checks were successful
Release Charts / Get Charts being Changed (push) Successful in 6s
Release Charts / Publish Helm Chart (push) Successful in 7s

Reviewed-on: #122
This commit is contained in:
Tommy 2024-11-14 19:20:51 +00:00 committed by Mooo[bot]
commit ede63a12c3
No known key found for this signature in database
GPG key ID: CF3AFE4D5B62BB9A
48 changed files with 2315 additions and 661 deletions

View file

@ -1,10 +1,10 @@
apiVersion: v2
name: attic
description: |
A Nix binary caching server
description: A Nix binary caching server
# renovate: image=ghcr.io/zhaofengli/attic
appVersion: 4dbdbee45728d8ce5788db6461aaaa89d98081f0
type: application
version: 0.2.4
version: 0.2.5
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,32 @@
config:
externalUrl: https://cache.example.com/
persistence:
existingClaim: attic
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: attic-secret
envFromSecret: attic-secret
image:
repository: ghcr.io/zhaofengli/attic
tag: 4dbdbee45728d8ce5788db6461aaaa89d98081f0
postgres:
secretName: attic-secret
resources:
limits:
memory: "3Gi"
cpu: "1000m"
# requests:
# cpu: 100m
# memory: 250Mi

View file

@ -0,0 +1,39 @@
config:
baseUrl: https://infrastructure.252.no
image:
repository: index.docker.io/lyft/clutch
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
existingSecretName: clutch-config-secret
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
existingSecretName: clutch-dbinit-secret
prometheus:
podmonitor:
enabled: true
resources: {}
limits:
cpu: 500m
memory: 768Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podLabels: {}
podAnnotations: {}

View file

@ -2,8 +2,9 @@ apiVersion: v2
name: conduit
description: Conduit is a simple, fast and reliable chat server powered by Matrix.
type: application
version: 0.4.0
# renovate: image=matrixconduit/matrix-conduit
appVersion: "0.6.0"
version: 0.4.1
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,104 @@
# Default values for conduit.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: registry.gitlab.com/famedly/conduit/matrix-conduit
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
conduit:
server_name: "your.server.name"
allowRegistration: false
allowEncryption: true
allowFederation: true
allowRoomCreation: true
allowUnstableRoomVersions: true
trustedServers:
- "matrix.org"
maxRequestSize: "20000000" # in bytes, ~20 MB
# maxConcurrentRequests: 100
maxConcurrentRequests: # log: "warn,rocket=off,_=off,sled=off"
log:
registrationToken:
podLabels: {}
podAnnotations: {}
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 6167
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
annotations: {}
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 1Gi

View file

@ -31,14 +31,16 @@ conduit:
allowRoomCreation: true
allowUnstableRoomVersions: true
trustedServers:
- "matrix.org"
maxRequestSize: "20000000" # in bytes, ~20 MB
- "matrix.org"
# in bytes, ~20 MB
maxRequestSize: "20000000"
# maxConcurrentRequests: 100
maxConcurrentRequests:
# log: "warn,rocket=off,_=off,sled=off"
log:
registrationToken:
podLabels: {}
podAnnotations: {}

View file

@ -1,9 +1,9 @@
apiVersion: v2
name: dispatch
description: Netflix Dispatch incident management system
version: 0.5.5
# renovate: image=code.252.no/elk-works/dispatch
appVersion: v20240731
version: 0.5.6
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,42 @@
envFromSecret: dispatch-secret
image:
registry: code.252.no
repository: elk-works/dispatch
tag: v20240731@sha256:e6c916d8bb6e02e7e23473229f3aa7643bcd00de5f59553794573a4a4737a635
pullPolicy: IfNotPresent
pullSecret: ""
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: dispatch-secret
url: http://localhost:80
authentication:
provider_slug: dispatch-auth-provider-header
header:
header_name: Tailscale-User-Login
core:
enabled: true
web:
enabled: true
service:
type: ClusterIP
port: 80
scheduler:
enabled: true
postgres:
hostname: "postgres-rw.databases.svc.cluster.local"
database_name: "dispatch"
port: "5432"
secretName: dispatch-secret

View file

@ -2,10 +2,10 @@ apiVersion: v2
name: element-call
description: Matrix video conference and call server
icon: https://avatars.githubusercontent.com/u/13446337?s=48&v=4
# renovate: image=ghcr.io/element-hq/element-call
appVersion: v0.5.15
version: 0.2.1
type: application
version: 0.2.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,79 @@
# Default values for element-call.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: ghcr.io/element-hq/element-call
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
config:
homeserver:
base_url: http://localhost:8008
server_name: localhost
livekit:
livekit_service_url: https://localhost/
posthog:
api_key:
api_host: https://localhost
rageshake:
submit_url:
eula_url:

View file

View file

@ -1,10 +1,10 @@
apiVersion: v2
name: grr
description: |
A toolset of DFIR tools
description: Toolset of DFIR tools
# renovate: image=ghcr.io/google/grr
appVersion: "20240508"
type: application
version: 0.2.0
version: 0.2.1
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,77 @@
initContainers:
dbInit:
image:
repository: mariadb
tag: "11.5.2"
mysql:
host: mariadb-dfir.databases.svc.cluster.local
port: 3306
existingSecretName: grr-mysql-init-secret
fleetspeak:
database: fleetspeak
grr:
database: grr
fleetspeak:
image:
repository: ghcr.io/google/fleetspeak
tag: latest
httpsHeaderChecksum: false
subjectCommonName: "fleetspeak-frontend"
admin:
replicas: 1
url: grr-fleetspeak-admin.sec-forensics.svc.cluster.local
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
frontend:
replicas: 1
url: grr-fleetspeak-frontend.sec-forensics.svc.cluster.local
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
grr:
image:
repository: "ghcr.io/google/grr"
tag: latest
subjectCommonName: grr.252.no
admin:
replicas: 1
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
frontend:
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
worker:
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
prometheus:
metricsPort: 9001

View file

@ -1 +0,0 @@
*/configs/*

View file

@ -1,9 +1,13 @@
apiVersion: v2
name: hashr
description: "Build your own hash sets based on your data sources. Extracts files and hashes \nout of input sources (e.g. raw disk image, GCE disk image, ISO file, Windows \nupdate package, .tar.gz file, etc)\n"
description: |
Build your own hash sets based on your data sources. Extracts
files and hashes out of input sources (e.g. raw disk image, GCE
disk image, ISO file, Windows update package, .tar.gz file, etc)
# renovate: image=us-docker.pkg.dev/osdfir-registry/hashr/release/hashr
appVersion: "20240508"
type: application
version: 0.2.0
version: 0.2.1
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

View file

@ -33,7 +33,7 @@ date: "{{ now | htmlDate }}"
Selector labels
*/}}
{{- define "hashr.selectorLabels" -}}
app.kubernetes.io/name: {{ include "timesketch.name" . }}
app.kubernetes.io/name: {{ include "hashr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View file

@ -4,9 +4,9 @@ description: |
High-performance self-hosted photo and video management
# renovate: image=ghcr.io/immich-app/immich-server
appVersion: 1.105.1
version: 1.0.3
type: application
icon: https://avatars.githubusercontent.com/u/109746326?s=200&v=4
version: 1.0.1
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -18,7 +18,7 @@ library:
enabled: true
template: "{{y}}/{{y}}-{{MM}}-{{dd}}/{{filename}}"
microservices:
microservices:
resources:
limits:
memory: "3Gi"

View file

@ -3,9 +3,9 @@ apiVersion: v2
type: application
name: linkding
description: A Helm chart for linkding
version: 2.0.3
# renovate: image=sissbruecker/linkding
appVersion: "1.36.0"
version: 2.0.4
sources:
- https://code.252.no/tommy/helm-charts
- https://github.com/sissbruecker/linkding

View file

@ -0,0 +1,49 @@
image:
repository: sissbruecker/linkding
pullPolicy: IfNotPresent
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: linkding-secret
env:
- name: TZ
value: Europe/Oslo
envFromSecret: linkding-env-secret
postgres:
secretName: linkding-secret
persistence:
existingClaim: linkding
securityContext: {}
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - my-node-xyz
tolerations: []
# - key: node-role.kubernetes.io/control-plane
# operator: Exists
# effect: NoSchedule

View file

@ -3,9 +3,9 @@ name: matrix-authentication-service
description: OAuth 2.0 and OpenID Provider for Matrix Homeservers
icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4
type: application
version: 0.1.7
# renovate: image=ghcr.io/element-hq/matrix-authentication-service
appVersion: "0.12.0"
version: 0.1.8
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,63 @@
nameOverride: ""
fullnameOverride: ""
existingSecretConfigMap: matrix-authentication-service-secret
postgres:
secretName: matrix-authentication-service-secret
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: matrix-authentication-service-secret
image:
registry: ghcr.io
repository: element-hq/matrix-authentication-service
pullPolicy: IfNotPresent
# -- Overrides the image tag whose default is the chart appVersion.
tag:
replicaCount: 1
podAnnotations: {}
podLabels: {}
podSecurityContext:
fsGroup: 1000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
livenessProbe:
httpGet:
port: internal
path: /health
readinessProbe:
httpGet:
port: internal
path: /health
prometheus:
servicemonitor:
enabled: true
labels: {}
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi

View file

@ -1,11 +1,11 @@
apiVersion: v2
name: matrix-synapse
description: Matrix homeserver
icon: https://matrix.org/images/matrix-logo.svg
appVersion: v1.104.0
type: application
version: 4.2.1
icon: https://matrix.org/images/matrix-logo.svg
# renovate: image=ghcr.io/element-hq/synapse
appVersion: v1.104.0
version: 4.2.2
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,951 @@
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: synapse-secret
prometheus:
podmonitor:
enabled: true
image:
repository: ghcr.io/element-hq/synapse
pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistryKeySecretName
# nameOverride: ""
# fullnameOverride: ""
# serverName: 'example.com'
# publicServerName: 'matrix.example.com'
# The source of the signing key used by Synapse in federation.
signingkey:
# Enable a Kubernetes job to generate and store a signing key if one does not
# exist.
# If you have already run a Matrix server at some point on your domain then
# you will want to keep the old signing key, either by using the `existingSecret`
# configuration, or by including the old key under `extraConfig.old_signing_keys`
#
# If you lose your signing key then any federation traffic from your instance
# might not be trusted any more by the wider network.
#
job:
enabled: true
# Annotations to apply to the signing-key-job.
annotations: {}
# argocd.argoproj.io/hook: PostSync
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
generateImage:
repository: matrixdotorg/synapse
# tag: latest
pullPolicy: IfNotPresent
publishImage:
repository: bitnami/kubectl
# tag: latest
pullPolicy: IfNotPresent
# Specify an existing signing key secret, will need to be created in advance.
# existingSecret: secret-name
# existingSecretKey: signing.key
# Resources to apply to the signing key generation job
#
resources: {}
# limits:
# cpu: 100m
# memory: 250Mi
# requests:
# cpu: 100m
# memory: 250Mi
# Matrix configuration values that affect other parts of the chart, for any
# value not handled by this block, you will want to instead set it in
# extraConfig below.
# Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
config:
# The publicly accessible URL for the Synapse instance, will default to
# https://<publicServerName>.
# publicBaseurl: 'https://matrix.example.com'
# The log level for Synapse and all modules.
# logLevel: INFO
# Should usage stats be reported
#
reportStats: false
# Protect registration with recaptcha. (optional)
#
# recaptcha:
# publicKey: ''
# privateKey: ''
# URIs and secret key for TURN servers to use to help establish 1:1 WebRTC
# calls.
#
# turnUris: []
# turnSecret: ''
# Registration configuration, note that registration with the
# container-internal register_new_matrix_user tool is always possible.
#
# enableRegistration: false
# NB; this value will default to a random string if not specified.
# registrationSharedSecret: ''
# NB; Strongly recommended to set this to a secure value.
# macaroonSecretKey: ''
# A set of trusted servers to contact if another server doesn't respond to a
# signing key request.
#
trustedKeyServers:
- server_name: matrix.org
# verify_keys:
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
# The bind addresses to use for the default listeners
#
# bindAddresses:
# - '::'
# Extra listeners to configure.
#
extraListeners: []
# - port: 9000
# bind_addresses: ['::']
# type: manhole
# Extra listener types to add onto the main listener.
#
extraMainListenerTypes: []
# - consent
# Logging
# use TerseJsonFormatter structured logging
# Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html
useStructuredLogging: false
# Specify arbitrary Synapse configuration here;
# Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
extraConfig: {}
# old_signing_keys:
# "ed25519:id": { key: "base64string", expired_ts: 123456789123 }
# use_presence: false
# enable_search: false
# federation_domain_whitelist:
# - lon.example.com
# - nyc.example.com
# - syd.example.com
# dynamic_thumbnails: true
# Specify additional loggers configutation here;
# Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html
extraLoggers: {}
# synapse.storage.SQL:
# level: WARNING
# Specify arbitrary - secret - Synapse configuration here;
# These values will be stored in secrets instead of configmaps
# Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
extraSecrets: {}
# password_config:
# pepper: ''
# Configuration to apply to the main Synapse pod.
##
synapse:
# Only really applicable when the deployment has an RWO PV attached (e.g. when media repository
# is enabled for the main Synapse pod)
# Since replicas = 1, an update can get "stuck", as the previous pod remains attached to the
# PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
# terminate the single previous pod, so that the new, incoming pod can attach to the PV
#
strategy:
type: RollingUpdate
# Annotations to apply to the main Synapse pod.
#
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/path: "/_synapse/metrics"
# prometheus.io/port: "9090"
# Labels to apply to the main Synapse pod.
#
labels: {}
# Additional environment variables to apply to the main Synapse pod
#
extraEnv: []
# - name: LD_PRELOAD
# value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# - name: SYNAPSE_CACHE_FACTOR
# value: "2"
# Additional volumes to mount into Synapse
#
extraVolumes: []
# - name: spamcheck
# flexVolume:
# driver: ananace/git-live
# options:
# repo: https://github.com/company/synapse-module
# interval: 1d
# readOnly: true
extraVolumeMounts: []
# - name: spamcheck
# mountPath: /usr/local/lib/python3.7/site-packages/company
# Extra commands to run when starting Synapse
#
extraCommands: []
# - 'apt-get update -yqq && apt-get install patch -yqq'
# - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch'
# Configuration for the pod security policy, Synapse will by always run as
# its own user, even if not set.
# Note that changing this may also require you to use the volumePermission
# helper depending on your storage.
#
# NB; The synapse install is currently unable to run as anything but UID:GID
# 666:666.
#
podSecurityContext: {}
# fsGroup: 666
# runAsGroup: 666
# runAsUser: 666
# Configuration for the container security policy, refer to the above
# podSecurityContext for more relevant information.
#
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 666
# Resources to apply to the main Synapse pod.
#
resources: {}
# limits:
# cpu: 1000m
# memory: 2500Mi
# requests:
# cpu: 1000m
# memory: 2500Mi
# Liveness probe configuration to use
#
livenessProbe:
httpGet:
path: /health
port: http
# Readiness probe configuration to use
#
readinessProbe:
httpGet:
path: /health
port: http
# Startup probe configuration to use
#
startupProbe:
failureThreshold: 12
httpGet:
path: /health
port: http
# Node selectors to set for the main Synapse pod.
#
nodeSelector: {}
# Tolerations to set for the main Synapse pod.
#
tolerations: []
# Affinity to set for the main Synapse pod.
#
affinity: {}
# Configuration for handling Synapse workers, which are useful for handling
# high-load deployments.
##
# More information is available at;
# https://github.com/matrix-org/synapse/blob/master/docs/workers.md
##
workers:
# Default configuration, this is inherited into all workers, and can also be
# overriden on each worker type.
#
default:
# The number of worker replicas, note that some workers require special
# handling. Refer to the information URL above.
#
replicaCount: 1
# Update strategy - only really applicable for deployments with RWO PVs attached (e.g. media repository)
# If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
# PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
# terminate the single previous pod, so that the new, incoming pod can attach to the PV
#
strategy:
type: RollingUpdate
# A specific name for this worker, can't be set globally.
# Note that this can only be set when replicaCount is 1
# name:
# Additional configuration to set for the worker, can't be set globally.
# extraConfig: {}
# Annotations to apply to the worker.
#
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/path: /_synapse/metrics
# prometheus.io/port: 9090
# Additional environment variables to add to the worker.
#
extraEnv: []
# - name: LD_PRELOAD
# value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# - name: SYNAPSE_CACHE_FACTOR
# value: "1.0"
# Additional volumes to add to the worker.
# Useful for the media repo, or for adding Python modules.
#
volumes: []
volumeMounts: []
# Extra commands to run when starting Synapse
#
extraCommands: []
# - 'apt-get update -yqq && apt-get install patch -yqq'
# - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch'
# Security context information to set to the worker.
#
podSecurityContext: {}
# fsGroup: 666
# runAsGroup: 666
# runAsUser: 666
# Container security context information to set to the worker.
#
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 666
# Resources to apply to the worker.
#
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Liveness probe configuration to use
#
livenessProbe:
httpGet:
path: /health
port: metrics
# Readiness probe configuration to use
#
readinessProbe:
httpGet:
path: /health
port: metrics
# Readiness probe configuration to use
# Defaults to allowing workers 60 seconds to start up
#
startupProbe:
failureThreshold: 6
httpGet:
path: /health
port: metrics
# Node selector configuration to set on the worker.
#
nodeSelector: {}
# Toleration configuration to set on the worker.
#
tolerations: []
# Affinity configuration to set on the worker.
#
affinity: {}
# The generic worker can be used to handle most endpoints.
# Be careful when enabling the sync endpoints as they can eat large amounts of
# resources. Refer to the information URL above for more info.
# Proper load balancing with the K8s Ingress resource may not be possible.
#
generic_worker:
enabled: false
generic: true
listeners: [ client, federation ]
csPaths:
# Sync requests
# - "/_matrix/client/(r0|v3)/sync$"
- "/_matrix/client/(api/v1|r0|v3)/events$"
# - "/_matrix/client/(api/v1|r0|v3)/initialSync$"
# - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$"
# Client API requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$"
- "/_matrix/client/v1/rooms/.*/hierarchy$"
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$"
- "/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$"
- "/_matrix/client/(r0|v3|unstable)/account/3pid$"
- "/_matrix/client/(r0|v3|unstable)/account/whoami$"
- "/_matrix/client/(r0|v3|unstable)/devices$"
- "/_matrix/client/versions$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/search$"
# Encryption requests
- "/_matrix/client/(r0|v3|unstable)/keys/query$"
- "/_matrix/client/(r0|v3|unstable)/keys/changes$"
- "/_matrix/client/(r0|v3|unstable)/keys/claim$"
- "/_matrix/client/(r0|v3|unstable)/room_keys/"
# Registration/login requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/login$"
- "/_matrix/client/(r0|v3|unstable)/register$"
- "/_matrix/client/v1/register/m.login.registration_token/validity$"
# Event sending requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|\
unban|kick)$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/join/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/profile/"
# User directory search requests
- "/_matrix/client/(r0|v3|unstable)/user_directory/search"
# Worker event streams
# See https://matrix-org.github.io/synapse/latest/workers.html#stream-writers
#
# The typing event stream
# - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
# The to_device event stream
# - "/_matrix/client/(r0|v3|unstable)/sendToDevice/"
# The account_data event stream
# - "/_matrix/client/(r0|v3|unstable)/.*/tags"
# - "/_matrix/client/(r0|v3|unstable)/.*/account_data"
# The receipts event stream
# - "/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt"
# - "/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers"
# The presence event stream
# - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/"
paths:
# Federation requests
- "/_matrix/federation/v1/event/"
- "/_matrix/federation/v1/state/"
- "/_matrix/federation/v1/state_ids/"
- "/_matrix/federation/v1/backfill/"
- "/_matrix/federation/v1/get_missing_events/"
- "/_matrix/federation/v1/publicRooms"
- "/_matrix/federation/v1/query/"
- "/_matrix/federation/v1/make_join/"
- "/_matrix/federation/v1/make_leave/"
- "/_matrix/federation/(v1|v2)/send_join/"
- "/_matrix/federation/(v1|v2)/send_leave/"
- "/_matrix/federation/(v1|v2)/invite/"
- "/_matrix/federation/v1/event_auth/"
- "/_matrix/federation/v1/exchange_third_party_invite/"
- "/_matrix/federation/v1/user/devices/"
- "/_matrix/key/v2/query"
- "/_matrix/federation/v1/hierarchy/"
# Inbound federation transaction request
- "/_matrix/federation/v1/send/"
# To separate the generic worker into specific concerns - for example federation transaction receiving;
# NB; This worker should have incoming traffic routed based on source IP, which is
# left as an exercise to the reader.
# https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing
# federation_reader:
# enabled: true
# generic: true
# listeners: [federation]
# paths:
# - "/_matrix/federation/v1/send/"
# Or /sync handling.
# NB; Care should be taken to route users to the same instance when scaling this worker,
# this is left as an exercise to the reader.
# https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing
# synchrotron:
# enabled: true
# generic: true
# listeners: [client]
# csPaths:
# - "/_matrix/client/(v2_alpha|r0|v3)/sync$"
# - "/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$"
# - "/_matrix/client/(api/v1|r0|v3)/initialSync$"
# - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$"
# Specialized - non-generic workers below;
# This worker deals with pushing notifications.
# NB; Only one instance of this worker can be run at a time, refer to the
# information URL above.
#
pusher:
enabled: false
# This worker handles sending data to registered appservices.
# NB; Only one instance of this worker can be run at at time, refer to the
# information URL above.
#
appservice:
enabled: false
generic: true
name: appservices
# This worker handles sending federation traffic to other Synapse servers.
#
federation_sender:
enabled: false
# Specialized workers can also be run as multiple separate instances,
# make sure to read the relevant documentation.
#
# federation_sender_other:
# app: federation_sender
# enabled: false
# This worker deals with serving and storing media.
# NB; Running multiple instances will conflict with background jobs.
#
media_repository:
enabled: false
listeners: [ media ]
csPaths:
- "/_matrix/media/.*"
- "/_synapse/admin/v1/purge_media_cache$"
- "/_synapse/admin/v1/room/.*/media"
- "/_synapse/admin/v1/user/.*/media"
- "/_synapse/admin/v1/media/"
- "/_synapse/admin/v1/quarantine_media/"
- "/_synapse/admin/v1/users/.*/media$"
paths:
- "/_matrix/media/.*"
# This worker deals with user directory searches.
#
user_dir:
enabled: false
name: userdir
listeners: [ client ]
csPaths:
- "/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
# This worker handles key uploads, and may also stub out presence if that is
# disabled. If you set extraConfig.use_presence=false then you may want to
# uncomment the second path.
#
frontend_proxy:
enabled: false
listeners: [ client ]
csPaths:
- "/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"
# - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status"
# This will set up a Lighttpd server to respond to any
# /.well-known/matrix/server requests, to make federation possible without
# adding SRV-records to DNS.
#
wellknown:
enabled: false
replicaCount: 1
# Lighttpd does not bind on IPv6 by default, although this is required in
# Ipv6-only clusters.
useIpv6: false
# The host and port combo to serve on .well-known/matrix/server.
#
server: {}
# m.server: matrix.example.com:443
# Data to serve on .well-known/matrix/client.
#
client: {}
# m.homeserver:
# base_url: https://matrix.example.com
# Configuration for the wellknown service.
#
service:
type: ClusterIP
port: 80
# Extra data objects to serve under .well-known/matrix/<data>
# Dictionaries will be JSON converted, plain strings will be served as they are
#
extraData: {}
# MSC1929 example;
# support:
# admins:
# - matrix_id: '@admin:example.com'
# email_address: 'admin@example.com'
# role: 'admin'
# support_page: 'https://example.com/support'
# A custom htdocs path, useful when running another image.
#
htdocsPath: /var/www/localhost/htdocs
# The lighttpd image to run.
#
image:
repository: ghcr.io/rtsp/docker-lighttpd
tag: latest
pullPolicy: Always
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
#
# pullSecrets:
# - myRegistryKeySecretName
# Configuration for the pod security policy.
#
podSecurityContext: {}
# fsGroup: 101
# runAsGroup: 101
# runAsUser: 100
# Configuration for the container security policy.
#
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 100
# Resource configuration to apply to the well-known server.
#
resources: {}
# limits:
# cpu: 5m
# memory: 15Mi
# requests:
# cpu: 5m
# memory: 15Mi
# Node selectors to set for the well-known server.
#
nodeSelector: {}
# Tolerations to set for the well-known server.
#
tolerations: []
# Affinity to set for the main well-known server.
#
affinity: {}
# This configuration is for setting up the internally provided Postgres server,
# if you instead want to use an existing server, then you may want to set
# enabled to false and configure the externalPostgresql block.
##
postgresql:
enabled: true
auth:
# XXX Change me!
password: synapse
# Or use existing secret with "password" key
# instead of static password
#
# existingSecret: postgresql-secret
username: synapse
database: synapse
primary:
initdb:
args: "--lc-collate=C --lc-ctype=C"
persistence:
# storageClass: "-"
size: 16Gi
# Extra arguments for the database connection
# ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config
#
extraArgs: {}
# An externally configured Postgres server to use for Synapse's database, note
# that the database needs to have both COLLATE and CTYPE set to "C".
##
externalPostgresql:
# host: postgres
port: 5432
username: synapse
# password: synapse
# The name of an existing secret with postgresql credentials
# existingSecret: postgres-secrets
# Password key to be retrieved from existing secret
# existingSecretPasswordKey: postgres-password
database: synapse
# sslmode: prefer
# Extra arguments for the database connection
# ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config
#
extraArgs: {}
# This configuration is for the internal Redis that's deployed for use with
# workers/sharding, for an external Redis server you want to set enabled to
# false and configure the externalRedis block.
##
redis:
enabled: true
# Database ID for non-default database
# dbid: 0
auth:
enabled: true
# XXX Change me!
password: synapse
# Or use existing secret with "redis-password" key
# instead of static password
#
# existingSecret: redis-secret
architecture: standalone
master:
kind: Deployment
persistence:
# Note that Synapse only uses redis as a synchronization utility, so no
# data will ever need to be persisted.
#
enabled: false
service:
port: 6379
# An externally configured Redis server to use for workers/sharding.
##
externalRedis:
# host: redis
port: 6379
# password: synapse
# Database ID for non-default database
# dbid: 0
# The name of an existing secret with redis credentials
# existingSecret: redis-secrets
# Password key to be retrieved from existing secret
# existingSecretPasswordKey: redis-password
# Persistence configuration for the media repository function.
# This PVC will be mounted in either Synapse or a media_repo worker.
#
# NB; If you want to be able to scale this, you will have to set the
# accessMode to RWX/ReadWriteMany.
#
persistence:
enabled: true
# existingClaim: synapse-data
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
# Set up an init container to chown the mounted media if necessary.
##
volumePermissions:
enabled: false
uid: 666
gid: 666
image:
repository: alpine
tag: latest
pullPolicy: Always
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
#
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
# Configuration for the main Synapse service.
# To use TLS inside Synapse itself, add an TLS listener, and change the ports
# configured in here.
#
service:
type: ClusterIP
port: 8008
targetPort: http
# The K8s ingress configuration, this will be quite heavily used in order to
# set up all routing necessary for use with a sharded Synapse instance.
# If you're not using a Ingress compatible K8s ingress, you will need to set up
# your own routing instead.
##
ingress:
enabled: true
# Generate traefik-compatible regex paths instead of nginx-compatible ones.
#
traefikPaths: false
# Annotations to apply to the created ingress resource.
#
annotations: {}
# nginx.ingress.kubernetes.io/use-regex: "true"
# # Sync proxy-body-size with Synapse's max_upload_size which is 10M by default
# nginx.ingress.kubernetes.io/proxy-body-size: 10m
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# Hosts to add to the ingress configuration for handling Client-to-Server
# API request paths.
#
# NB; config.serverName is included if includeServerName is set. (default)
#
csHosts: []
# - matrix.example.com
# Additional hosts to add to the ingress configuration for handling
# Server-to-Server API requests.
#
# NB; config.serverName is included if includeServerName is set. (default)
#
hosts: []
# - example.com
# Additional hosts to add to the ingress configuration for handling
# well-known requests.
#
# NB; config.serverName is included if includeServerName is set. (default)
#
wkHosts: []
# - example.com
# Additional paths to add to the Server-to-Server ingress blocks, will be
# inserted before the /_matrix catch-all path.
#
paths: []
# # K8s 1.19+
# - path: /_matrix/media
# pathType: Prefix
# backend:
# service:
# name: matrix-media-repo
# port: 8000
# # K8s <1.19
# - path: /_matrix/media
# backend:
# serviceName: matrix-media-repo
# servicePort: 8000
# Additional paths to add to the Client-to-Server ingress blocks, will be
# inserted before the /_matrix and /_synapse catch-all paths.
#
csPaths: []
# # K8s 1.19+
# - path: /_matrix/media
# pathType: Prefix
# backend:
# service:
# name: matrix-media-repo
# port:
# number: 8000
# # K8s <1.19
# - path: /_matrix/media
# backend:
# serviceName: matrix-media-repo
# servicePort: 8000
# Should the /_synapse path be included in the ingress, admin APIs are
# provided under this path.
#
includeUnderscoreSynapse: true
# Should config.serverName be included in the list of ingress paths, can be
# set to false if the main domain is managed in some external way.
#
includeServerName: true
# TLS configuration to include in the ingress configuration
#
tls: []
# - secretName: chart-example-tls
# hosts:
# - example.com
# - matrix.example.com
# Set the name of the IngressClass cluster resource (optional)
# https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-v1/#IngressSpec
# className: can-be-anything
# Specifies whether a service account should be created, and annotations to add.
#
serviceAccount:
create: false
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::000000000000:role/matrix-synapse
# name: non-default-service-name

File diff suppressed because it is too large Load diff

View file

@ -5,7 +5,7 @@ description: |
# renovate: image=dock.mau.dev/maubot/maubot
appVersion: v0.5.0
type: application
version: 0.1.6
version: 0.1.7
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,30 @@
existingSecretConfigMap: maubot-secret
persistence:
existingClaim: maubot
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: maubot-secret
envFromSecret: maubot-secret
image:
repository: dock.mau.dev/maubot/maubot
tag: v0.5.0
postgres:
secretName: maubot-secret
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
cpu: 100m
memory: 50Mi

View file

@ -1,11 +1,12 @@
apiVersion: v2
name: netbox
description: |
An IP address management (IPAM) and data center infrastructure management (DCIM) tool.
An IP address management (IPAM) and data center infrastructure management
(DCIM) tool.
type: application
# renovate image: quay.io/netboxcommunity/netbox
appVersion: v4.1.2
type: application
version: 6.1.4
version: 6.1.5
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,136 @@
replicaCount: 1
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: netbox-secret
image:
repository: quay.io/netboxcommunity/netbox
tag: v4.1.6
pullPolicy: Always
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
existingSecret: netbox-secret
extraEnv: {}
# EMAIL_SERVER: localhost
# EMAIL_PORT: 25
# EMAIL_USERNAME: netbox
# EMAIL_TIMEOUT: 5
# EMAIL_FROM: netbox@example.com
# MAX_PAGE_SIZE: 1000
# WEBHOOKS_ENABLED: true
extraEnvFrom: []
# - configMapRef:
# name: custom-config
# - secretRef:
# name: custom-secrets
extraSecrets: {}
# EMAIL_PASSWORD: netbox
# Ends up stored as extra.py in the netbox configuration, must be valid Python
extraConfiguration: ''
# Will be stored in plugins.py in the netbox configuration, requires
# using/building a netbox image containing the required plugins, must be valid
# Python
#
# https://github.com/netbox-community/netbox-docker/wiki/Using-Netbox-Plugins
extraPlugins: ''
# PLUGINS = ["netbox_bgp"]
#
# PLUGINS_CONFIG = {
# "netbox_bgp": {
# ADD YOUR SETTINGS HERE
# }
# }
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 500m
# memory: 512Mi
nodeSelector: {}
tolerations: []
affinity: {}
extraVolumes: []
extraVolumeMounts: []
readinessProbe:
httpGet:
port: http
path: /
livenessProbe:
httpGet:
port: http
path: /
startupProbe:
tcpSocket:
port: http
failureThreshold: 12
persistence:
enabled: false
# existingClaim: netbox-data
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
worker:
enabled: true
replicaCount: 1
resources: {}
# limits:
# cpu: 100m
# memory: 150Mi
# requests:
# cpu: 100m
# memory: 150Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Handles the Netbox 3.0+ housekeeping pod
housekeeping:
enabled: true
resources: {}
# limits:
# cpu: 100m
# memory: 32Mi
# requests:
# cpu: 100m
# memory: 32Mi
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -134,4 +134,3 @@ housekeeping:
tolerations: []
affinity: {}

View file

@ -1,11 +1,12 @@
apiVersion: v2
name: ntfy-sh
description: |
ntfy lets you send push notifications to your phone or desktop via scripts from any computer
ntfy lets you send push notifications to your phone or desktop via scripts
from any computer
type: application
# renovate: image=binwiederhier/ntfy
appVersion: 2.11.0
type: application
version: 0.2.6
version: 0.2.7
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,20 @@
config:
baseUrl: https://ntfy.sh
persistence:
existingClaim: ntfy-sh-server
image:
repository: binwiederhier/ntfy
prometheus:
podmonitor:
enabled: true
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
cpu: 100m
memory: 50Mi

View file

@ -2,15 +2,13 @@ apiVersion: v2
name: sliding-sync-proxy
description: A proxy for modern Matrix messaging clients
icon: https://avatars.githubusercontent.com/u/8418310?s=200&v=4
type: application
# renovate: image=ghcr.io/matrix-org/sliding-sync
appVersion: "0.99.19"
type: application
version: 1.1.1
version: 1.1.2
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me
keywords:
- messaging
- encrypted

View file

@ -0,0 +1,63 @@
homeserver: "https://synapse.example.com"
serverAddress: "https://slidingsync.example.com"
existingSecret: "slidingsync-secret"
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: slidingsync-secret
image:
repository: ghcr.io/matrix-org/sliding-sync
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
replicaCount: 1
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
service:
type: ClusterIP
port: 80
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -1,9 +1,9 @@
apiVersion: v2
name: stalwart-mail
description: Stalwart is a JMAP, IMAP4 and SMTP server
version: 0.2.7
# renovate: image=stalwartlabs/mail-server
appVersion: 0.10.6
version: 0.2.8
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,37 @@
existingSecret: stalwart-mail-secret
extraEnvSecret: stalwart-mail-extraenv-secret
existingConfigMap: stalwart-mail-config
tlsSecret: stalwart-mail-tls
replicaCount: 1
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: stalwart-mail-secret
image:
repository: stalwartlabs/mail-server
pullPolicy: IfNotPresent
tag: ""
strategy:
type: Recreate
service:
type: NodePort
ports:
http: 8080
https: 443
smtp: 25
submission: 587
smtps: 465
imap: 143
imaps: 993
sieve: 4190
podAnnotations: {}

View file

@ -1,11 +1,11 @@
apiVersion: v2
name: timesketch
type: application
description: |
A toolset of DFIR tools
# renovate: image=us-docker.pkg.dev/osdfir-registry/timesketch/timesketch
appVersion: "20241009"
type: application
version: 0.2.3
version: 0.2.4
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,54 @@
config:
externalUrl: https://timesketch.example.com/
existingConfSecret: timesketch-conf
existingUserSecret: timesketch-user
createUser: true
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: timesketch-secret
image:
repository: us-docker.pkg.dev/osdfir-registry/timesketch/timesketch
pullPolicy: IfNotPresent
tag: "20240508.1"
imagePullSecrets: []
worker:
podSecurityContext: {}
securityContext: {}
frontend:
podSecurityContext: {}
securityContext: {}
postgres:
secretName: timesketch-secret
upload:
persistence:
accessMode: ReadWriteMany
size: 10Gi
storageClass: ceph-filesystem
persistentVolumeClaim: timesketch-upload
resources:
limits:
memory: "3Gi"
cpu: "1000m"
requests:
cpu: 100m
memory: 250Mi
metrics:
enabled: true
port: 9001
caCert:
existingConfigMapName: cluster-certificates
existingConfigMapKey: opensearch-ca.crt

View file

@ -5,7 +5,7 @@ description: |
# renovate: image=us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-server
appVersion: "20240930"
type: application
version: 0.1.6
version: 0.1.7
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,93 @@
config:
externalUrl: https://turbinia.example.com/
persistence:
general:
storageClass: ceph-filesystem
accessMode: ReadWriteMany
size: 2Gi
evidence:
storageClass: ceph-filesystem
accessMode: ReadWriteMany
size: 2Gi
server:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-server
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
worker:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-worker
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext:
# Due to Turbinia attaching and detaching disks, a privileged container is required for the worker container.
privileged: true
nodeSelector: {}
tolerations: []
affinity: {}
autoscaling:
minReplicas: 1
maxReplicas: 2
targetCPUUtilizationPercentage: 50
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
api:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-api-server
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
controller:
image:
repository: us-docker.pkg.dev/osdfir-registry/turbinia/release/turbinia-controller
pullPolicy: IfNotPresent
tag: "20240930"
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 128Mi
metrics:
port: 9001

View file

@ -2,14 +2,14 @@ apiVersion: v2
name: well-known
description: Well-known server supporting json, plain text and 2 levels of directories
type: application
version: 2.0.8
# renovate: image=code.252.no/tommy/well-known
appVersion: v2.0.2
version: 2.0.9
maintainers:
- email: tommy@252.no
name: Tommy Skaug
home: https://code.252.no/tommy/charts/src/branch/main/charts/well-known
sources:
- https://code.252.no/tommy/well-known
- https://code.252.no/pub/well-known
keywords:
- well-known
- server

View file

@ -1,98 +0,0 @@
# well-known
## TL;DR;
```console
helm repo add k8status https://stenic.github.io/well-known/
helm install well-known --namespace well-known well-known/well-known
```
## Introduction
This chart installs `well-known` on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.18+
- Helm 3.0+
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm repo add well-known https://stenic.github.io/well-known/
helm install well-known --namespace well-known well-known/well-known
```
These commands deploy well-known on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables list the configurable parameters of the well-known chart and their default values.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| autoscaling.enabled | bool | `false` | |
| autoscaling.maxReplicas | int | `100` | |
| autoscaling.minReplicas | int | `1` | |
| autoscaling.targetCPUUtilizationPercentage | int | `80` | |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"Always"` | |
| image.repository | string | `"ghcr.io/stenic/well-known"` | |
| image.tag | string | `""` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations | object | `{}` | |
| ingress.className | string | `""` | |
| ingress.enabled | bool | `false` | |
| ingress.hosts[0].host | string | `"chart-example.local"` | |
| ingress.hosts[0].paths[0].path | string | `"/.well-known/"` | |
| ingress.hosts[0].paths[0].pathType | string | `"ImplementationSpecific"` | |
| ingress.tls | list | `[]` | |
| nameOverride | string | `""` | |
| networkpolicies.enabled | bool | `false` | |
| networkpolicies.kubeApiServerCIDR | string | `"<IP>/32"` | |
| nodeSelector | object | `{}` | |
| podAnnotations | object | `{}` | |
| podSecurityContext | object | `{}` | |
| replicaCount | int | `2` | |
| resources.limits.cpu | string | `"50m"` | |
| resources.limits.memory | string | `"64Mi"` | |
| resources.requests.cpu | string | `"20m"` | |
| resources.requests.memory | string | `"32Mi"` | |
| securityContext.allowPrivilegeEscalation | bool | `false` | |
| securityContext.capabilities.drop[0] | string | `"ALL"` | |
| securityContext.readOnlyRootFilesystem | bool | `true` | |
| securityContext.runAsNonRoot | bool | `true` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
| tolerations | list | `[]` | |
| webserver.image.pullPolicy | string | `"Always"` | |
| webserver.image.repository | string | `"nginxinc/nginx-unprivileged"` | |
| webserver.image.tag | string | `"1.23"` | |
| webserver.resources.limits.cpu | string | `"50m"` | |
| webserver.resources.limits.memory | string | `"24Mi"` | |
| webserver.resources.requests.cpu | string | `"10m"` | |
| webserver.resources.requests.memory | string | `"10Mi"` | |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml well-known/well-known
```

View file

@ -1,57 +0,0 @@
{{ template "chart.header" . }}
## TL;DR;
```console
helm repo add k8status https://stenic.github.io/well-known/
helm install well-known --namespace well-known well-known/{{ template "chart.name" . }}
```
## Introduction
This chart installs `{{ template "chart.name" . }}` on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.18+
- Helm 3.0+
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm repo add well-known https://stenic.github.io/well-known/
helm install well-known --namespace well-known well-known/{{ template "chart.name" . }}
```
These commands deploy {{ template "chart.name" . }} on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables list the configurable parameters of the {{ template "chart.name" . }} chart and their default values.
{{ template "chart.valuesTable" . }}
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml well-known/{{ template "chart.name" . }}
```

View file

@ -1,62 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "well-known.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "well-known.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,49 +0,0 @@
{{- if .Values.networkpolicies.enabled -}}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "well-known.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "well-known.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "well-known.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
- Egress
ingress:
# Accept all traffic on http port
- ports:
- port: http
protocol: TCP
egress:
# Allow all traffic to the kubernetes API
{{- range .Values.networkpolicies.kubeApi }}
- to:
{{- range .addresses }}
- ipBlock:
cidr: {{ . }}/32
{{- end }}
ports:
{{- range .ports | default (list 443) }}
- port: {{ . }}
protocol: TCP
{{- end }}
{{- end }}
# Allow traffic to kube-dns
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
{{- end -}}

View file

@ -1,21 +1,21 @@
apiVersion: v2
name: yeti
version: 1.0.3
appVersion: 2.1.11
description: A Helm chart for Yeti Kubernetes deployments.
# renovate: image=yetiplatform/yeti
appVersion: 2.1.11
version: 1.0.4
keywords:
- yeti
- dfir
- security
home: "https://yeti-platform.io/"
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me
url: https://github.com/tommy-skaug/charts
sources:
- https://yeti-platform.io
- https://github.com/yeti-platform/yeti
- https://github.com/google/osdfir-infrastructure
appVersion: "latest"
annotations:
category: Security
licenses: Apache-2.0

View file

@ -0,0 +1,72 @@
existingSecret: yeti-secret
existingConfigSecret: yeti-conf
frontend:
image:
repository: yetiplatform/yeti-frontend
tag: 2.1.13
pullPolicy: IfNotPresent
podSecurityContext: {}
containerSecurityContext: {}
nodeSelector: {}
affinity: {}
tolerations: {}
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "2Gi"
cpu: "1000m"
backend:
image:
repository: yetiplatform/yeti
tag: 2.1.13
pullPolicy: IfNotPresent
podSecurityContext: {}
api:
containerSecurityContext: {}
nodeSelector: {}
affinity: {}
tolerations: {}
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "2Gi"
cpu: "1000m"
tasks:
containerSecurityContext: {}
nodeSelector: {}
affinity: {}
tolerations: {}
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "2Gi"
cpu: "1000m"
redis:
host: dragonfly.databases.svc.cluster.local
port: 6397
arangodb:
database: yeti
host: arango-dfir-cluster-ea.databases.svc.cluster.local
port: 8529
timesketch:
enabled: false
endpoint: ""
serviceAccount:
name: "yeti"
annotations: {}
metrics:
enabled: true
port: 9001