feat: add conformity with tests according to Helm spec and changelog to releases. Bump versions of all charts.

This commit is contained in:
Tommy 2024-04-09 14:28:39 +02:00
parent 168b5ffee6
commit 3f448017c5
No known key found for this signature in database
25 changed files with 1256 additions and 48 deletions

View file

@ -14,13 +14,39 @@ env:
HELM_VERSION: 3.14.3
jobs:
conformity:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate and validate releases
uses: docker://ghcr.io/shivjm/helm-kubeconform-action:v0.2.0
env:
ADDITIONAL_SCHEMA_PATHS: |
schemas/{{ .ResourceKind }}.json
CHARTS_DIRECTORY: "charts"
KUBECONFORM_STRICT: "true"
HELM_UPDATE_DEPENDENCIES: "true"
find-charts-to-release:
needs: conformity
if: needs.conformity.result == 'success'
runs-on: ubuntu-latest
outputs:
modified-charts-files: ${{ steps.list-changed-charts.outputs.all_modified_files }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@ -37,7 +63,7 @@ jobs:
container: quay.io/git-chglog/git-chglog:0.15.4
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@ -52,7 +78,6 @@ jobs:
apk add git grep yq
alias git-tag='git tag'
# TODO: Bundle all of that logic in a Github Action to make it easy to share.
for chart_file in ${{ needs.find-charts-to-release.outputs.modified-charts-files }}; do
chart_name=$(grep -Po "(?<=^name: ).+" ${chart_file})
chart_version=$(grep -Po "(?<=^version: ).+" ${chart_file})
@ -88,11 +113,12 @@ jobs:
release-charts:
needs: generate-charts-changelog
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@ -107,6 +133,7 @@ jobs:
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
with:
@ -128,7 +155,7 @@ jobs:
- release-charts
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0

View file

@ -4,7 +4,7 @@ description: |
A Nix binary caching server
appVersion: 4dbdbee45728d8ce5788db6461aaaa89d98081f0
type: application
version: 0.1.4
version: 0.2.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,31 @@
config:
externalUrl: https://cache.example.com/
persistence:
existingClaim: attic
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: attic-secret
envFromSecret: attic-secret
image:
repository: ghcr.io/zhaofengli/attic
tag: 4dbdbee45728d8ce5788db6461aaaa89d98081f0
postgres:
secretName: attic-secret
resources:
limits:
memory: "3Gi"
cpu: "1000m"
# requests:
# cpu: 100m
# memory: 250Mi

View file

@ -2,7 +2,7 @@ apiVersion: v2
name: conduit
description: Conduit is a simple, fast and reliable chat server powered by Matrix.
type: application
version: 0.3.0
version: 0.4.0
appVersion: "0.6.0"
maintainers:
- name: Tommy Skaug

View file

@ -1,3 +0,0 @@
apiVersion: v1
entries: {}
generated: "2023-12-16T11:30:08.388054+01:00"

View file

@ -0,0 +1,103 @@
# Default values for conduit.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: registry.gitlab.com/famedly/conduit/matrix-conduit
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
conduit:
server_name: "your.server.name"
allowRegistration: false
allowEncryption: true
allowFederation: true
allowRoomCreation: true
allowUnstableRoomVersions: true
trustedServers:
- "matrix.org"
maxRequestSize: "20000000" # in bytes, ~20 MB
# maxConcurrentRequests: 100
maxConcurrentRequests:
# log: "warn,rocket=off,_=off,sled=off"
log:
registrationToken:
podLabels: {}
podAnnotations: {}
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 6167
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
annotations: {}
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 1Gi

View file

@ -1,7 +1,7 @@
apiVersion: v2
name: dispatch
description: Netflix Dispatch incident management system
version: 0.3.3
version: 0.4.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,41 @@
envFromSecret: dispatch-secret
image:
repository: ghcr.io/tommy-skaug/dispatch
tag: v20230919
pullPolicy: Always
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: dispatch-secret
url: http://localhost:80
authentication:
provider_slug: dispatch-auth-provider-header
header:
header_name: Tailscale-User-Login
core:
enabled: true
web:
enabled: true
service:
type: ClusterIP
port: 80
scheduler:
enabled: true
postgres:
hostname: "postgres-rw.databases.svc.cluster.local"
database_name: "dispatch"
port: "5432"
secretName: dispatch-secret

View file

@ -5,7 +5,7 @@ icon: https://avatars.githubusercontent.com/u/13446337?s=48&v=4
appVersion: v0.5.15
type: application
version: 0.1.8
version: 0.2.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -1,26 +0,0 @@
suite: test service
templates:
- templates/service.yaml
tests:
- it: should pass
release:
name: element-call
set:
service:
type: ClusterIP
asserts:
- contains:
path: spec.ports
content:
name: http
port: 8080
protocol: TCP
targetPort: http
- equal:
path: spec.type
value: ClusterIP
- equal:
path: spec.selector
value:
app.kubernetes.io/name: element-call
app.kubernetes.io/instance: element-call

View file

@ -0,0 +1,79 @@
# Default values for element-call.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: ghcr.io/element-hq/element-call
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
config:
homeserver:
base_url: http://localhost:8008
server_name: localhost
livekit:
livekit_service_url: https://localhost/
posthog:
api_key:
api_host: https://localhost
rageshake:
submit_url:
eula_url:

View file

@ -1,3 +0,0 @@
apiVersion: v1
entries: {}
generated: "2023-12-16T11:31:03.238235+01:00"

View file

@ -5,7 +5,7 @@ icon: https://matrix.org/images/matrix-logo.svg
appVersion: 1.99.0
type: application
version: 4.0.8
version: 4.1.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -6,7 +6,6 @@
{{- fail "You must specify a static redis password or existing secret if using the included redis chart" -}}
{{- end -}}
---
# Server: {{ required "A valid serverName is required" .Values.serverName }}
apiVersion: apps/v1
kind: Deployment
metadata:

View file

@ -0,0 +1,699 @@
serverName: example.com
existingSecrets: synapse-secret
image:
repository: ghcr.io/element-hq/synapse
tag: &synapseVersion "v1.104.0"
pullPolicy: IfNotPresent
signingkey:
job:
enabled: false
existingSecret: synapse-secret
existingSecretKey: signing.key
resources:
limits:
cpu: 300m
memory: 768Mi
requests:
cpu: 10m
memory: 100Mi
## Matrix configuration values that affect other parts of the chart, for any
## value not handled by this block, you will want to instead set it in
## extraConfig below.
## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
config:
publicBaseurl: https://synapse.example.com/
# logLevel: INFO
reportStats: false
## URIs and secret key for TURN servers to use to help establish 1:1 WebRTC
## calls.
##
# turnUris: []
# turnSecret: ''
## Registration configuration, note that registration with the
## container-internal register_new_matrix_user tool is always possible.
##
enableRegistration: true
## NB; this value will default to a random string if not specified.
# registrationSharedSecret: ''
registrationSharedSecret: synapse-secret
registrationSharedSecretKey: REGISTRATION_SECRET
existingmacaroonSecret: synapse-secret
existingmacaroonSecretKey: MACAROON_SECRET_KEY
## A set of trusted servers to contact if another server doesn't respond to a
## signing key request.
##
trustedKeyServers:
- server_name: matrix.org
# verify_keys:
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
## The bind addresses to use for the default listeners
##
# bindAddresses:
# - '::'
## Extra listeners to configure.
##
extraListeners: []
# - port: 9000
# bind_addresses: ['::']
# type: manhole
## Extra listener types to add onto the main listener.
##
extraMainListenerTypes: []
# - consent
useStructuredLogging: true
## Specify arbitrary Synapse configuration here;
## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
extraConfig:
user_presence: true
enable_search: true
federation_domain_whitelist:
- matrix.org
dynamic_thumbnails: true
## Specify additional loggers configutation here;
## Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html
extraLoggers: {}
# synapse.storage.SQL:
# level: WARNING
## Specify arbitrary - secret - Synapse configuration here;
## These values will be stored in secrets instead of configmaps
## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
##
extraSecrets: {}
# password_config:
# pepper: ''
## Configuration to apply to the main Synapse pod.
##
synapse:
## Only really applicable when the deployment has an RWO PV attached (e.g. when media repository
## is enabled for the main Synapse pod)
## Since replicas = 1, an update can get "stuck", as the previous pod remains attached to the
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
##
strategy:
type: RollingUpdate
## Annotations to apply to the main Synapse pod.
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/_synapse/metrics"
prometheus.io/port: "9090"
## Labels to apply to the main Synapse pod.
##
labels: {}
## Additional environment variables to apply to the main Synapse pod
##
extraEnv: []
# - name: LD_PRELOAD
# value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# - name: SYNAPSE_CACHE_FACTOR
# value: "2"
## Additional volumes to mount into Synapse
##
extraVolumes: []
# - name: spamcheck
# flexVolume:
# driver: ananace/git-live
# options:
# repo: https://github.com/company/synapse-module
# interval: 1d
# readOnly: true
extraVolumeMounts: []
# - name: spamcheck
# mountPath: /usr/local/lib/python3.7/site-packages/company
## Extra commands to run when starting Synapse
##
extraCommands: []
# - 'apt-get update -yqq && apt-get install patch -yqq'
# - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch'
## Configuration for the pod security policy, Synapse will by always run as
## its own user, even if not set.
## Note that changing this may also require you to use the volumePermission
## helper depending on your storage.
##
## NB; The synapse install is currently unable to run as anything but UID:GID
## 666:666.
##
podSecurityContext: {}
# fsGroup: 666
# runAsGroup: 666
# runAsUser: 666
## Configuration for the container security policy, refer to the above
## podSecurityContext for more relevant information.
##
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 666
## Resources to apply to the main Synapse pod.
##
resources: {}
# limits:
# cpu: 1000m
# memory: 2500Mi
# requests:
# cpu: 1000m
# memory: 2500Mi
## Liveness probe configuration to use
##
livenessProbe:
httpGet:
path: /health
port: http
## Readiness probe configuration to use
##
readinessProbe:
httpGet:
path: /health
port: http
## Startup probe configuration to use
##
startupProbe:
failureThreshold: 12
httpGet:
path: /health
port: http
## Node selectors to set for the main Synapse pod.
##
nodeSelector: {}
## Tolerations to set for the main Synapse pod.
##
tolerations: []
## Affinity to set for the main Synapse pod.
##
affinity: {}
## Configuration for handling Synapse workers, which are useful for handling
## high-load deployments.
##
## More information is available at;
## https://github.com/matrix-org/synapse/blob/master/docs/workers.md
##
workers:
default:
replicaCount: "1"
## Update strategy - only really applicable for deployments with RWO PVs attached (e.g. media repository)
## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
## terminate the single previous pod, so that the new, incoming pod can attach to the PV
##
strategy:
type: RollingUpdate
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: /_synapse/metrics
prometheus.io/port: "9090"
## Additional environment variables to add to the worker.
##
extraEnv: []
# - name: LD_PRELOAD
# value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
# - name: SYNAPSE_CACHE_FACTOR
# value: "1.0"
## Additional volumes to add to the worker.
## Useful for the media repo, or for adding Python modules.
##
volumes: []
volumeMounts: []
## Extra commands to run when starting Synapse
##
extraCommands: []
# - 'apt-get update -yqq && apt-get install patch -yqq'
# - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch'
## Security context information to set to the worker.
##
podSecurityContext: {}
# fsGroup: 666
# runAsGroup: 666
# runAsUser: 666
## Container security context information to set to the worker.
##
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 666
## Resources to apply to the worker.
##
resources:
requests:
cpu: 10m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
## Liveness probe configuration to use
##
livenessProbe:
httpGet:
path: /health
port: metrics
## Readiness probe configuration to use
##
readinessProbe:
httpGet:
path: /health
port: metrics
## Readiness probe configuration to use
## Defaults to allowing workers 60 seconds to start up
##
startupProbe:
failureThreshold: 6
httpGet:
path: /health
port: metrics
## Node selector configuration to set on the worker.
##
nodeSelector: {}
## Toleration configuration to set on the worker.
##
tolerations: []
## Affinity configuration to set on the worker.
##
affinity: {}
## The generic worker can be used to handle most endpoints.
## Be careful when enabling the sync endpoints as they can eat large amounts of
## resources. Refer to the information URL above for more info.
## Proper load balancing with the K8s Ingress resource may not be possible.
##
generic_worker:
enabled: true
generic: true
listeners: [client, federation, keys]
csPaths:
## Sync requests
- "/_matrix/client/(r0|v3)/sync$"
- "/_matrix/client/(api/v1|r0|v3)/events$"
- "/_matrix/client/(api/v1|r0|v3)/initialSync$"
- "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$"
## Client API requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$"
- "/_matrix/client/v1/rooms/.*/hierarchy$"
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$"
- "/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$"
- "/_matrix/client/(r0|v3|unstable)/account/3pid$"
- "/_matrix/client/(r0|v3|unstable)/account/whoami$"
- "/_matrix/client/(r0|v3|unstable)/devices$"
- "/_matrix/client/versions$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/search$"
## Encryption requests
- "/_matrix/client/(r0|v3|unstable)/keys/query$"
- "/_matrix/client/(r0|v3|unstable)/keys/changes$"
- "/_matrix/client/(r0|v3|unstable)/keys/claim$"
- "/_matrix/client/(r0|v3|unstable)/room_keys/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"
## Registration/login requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/login$"
- "/_matrix/client/(r0|v3|unstable)/register$"
- "/_matrix/client/v1/register/m.login.registration_token/validity$"
## Event sending requests
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$"
- "/_matrix/client/(api/v1|r0|v3|unstable)/join/"
- "/_matrix/client/(api/v1|r0|v3|unstable)/profile/"
## User directory search requests
- "/_matrix/client/(r0|v3|unstable)/user_directory/search"
## Worker event streams
## See https://matrix-org.github.io/synapse/latest/workers.html#stream-writers
##
## The typing event stream
- "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
## The to_device event stream
- "/_matrix/client/(r0|v3|unstable)/sendToDevice/"
## The account_data event stream
- "/_matrix/client/(r0|v3|unstable)/.*/tags"
- "/_matrix/client/(r0|v3|unstable)/.*/account_data"
## The receipts event stream
- "/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt"
- "/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers"
## The presence event stream
- "/_matrix/client/(api/v1|r0|v3|unstable)/presence/"
paths:
## Federation requests
- "/_matrix/federation/v1/event/"
- "/_matrix/federation/v1/state/"
- "/_matrix/federation/v1/state_ids/"
- "/_matrix/federation/v1/backfill/"
- "/_matrix/federation/v1/get_missing_events/"
- "/_matrix/federation/v1/publicRooms"
- "/_matrix/federation/v1/query/"
- "/_matrix/federation/v1/make_join/"
- "/_matrix/federation/v1/make_leave/"
- "/_matrix/federation/(v1|v2)/send_join/"
- "/_matrix/federation/(v1|v2)/send_leave/"
- "/_matrix/federation/(v1|v2)/invite/"
- "/_matrix/federation/v1/event_auth/"
- "/_matrix/federation/v1/exchange_third_party_invite/"
- "/_matrix/federation/v1/user/devices/"
- "/_matrix/key/v2/query"
- "/_matrix/federation/v1/hierarchy/"
## Inbound federation transaction request
- "/_matrix/federation/v1/send/"
## To separate the generic worker into specific concerns - for example federation transaction receiving;
## NB; This worker should have incoming traffic routed based on source IP, which is
## left as an exercise to the reader.
## https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing
# federation_reader:
# enabled: true
# generic: true
# listeners: [federation]
# paths:
# - "/_matrix/federation/v1/send/"
## Or /sync handling.
## NB; Care should be taken to route users to the same instance when scaling this worker,
## this is left as an exercise to the reader.
## https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing
# synchrotron:
# enabled: true
# generic: true
# listeners: [client]
# csPaths:
# - "/_matrix/client/(v2_alpha|r0|v3)/sync$"
# - "/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$"
# - "/_matrix/client/(api/v1|r0|v3)/initialSync$"
# - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$"
## Specialized - non-generic workers below;
## This worker deals with pushing notifications.
## NB; Only one instance of this worker can be run at a time, refer to the
## information URL above.
##
pusher:
enabled: false
## This worker handles sending data to registered appservices.
## NB; Only one instance of this worker can be run at at time, refer to the
## information URL above.
##
appservice:
enabled: false
generic: true
name: appservices
## This worker handles sending federation traffic to other Synapse servers.
##
federation_sender:
enabled: false
## Specialized workers can also be run as multiple separate instances,
## make sure to read the relevant documentation.
##
# federation_sender_other:
# app: federation_sender
# enabled: false
## This worker deals with serving and storing media.
## NB; Running multiple instances will conflict with background jobs.
##
media_repository:
enabled: false
listeners: [media]
csPaths:
- "/_matrix/media/.*"
- "/_synapse/admin/v1/purge_media_cache$"
- "/_synapse/admin/v1/room/.*/media"
- "/_synapse/admin/v1/user/.*/media"
- "/_synapse/admin/v1/media/"
- "/_synapse/admin/v1/quarantine_media/"
- "/_synapse/admin/v1/users/.*/media$"
paths:
- "/_matrix/media/.*"
## This worker deals with user directory searches.
##
user_dir:
enabled: false
name: userdir
listeners: [client]
csPaths:
- "/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
## This worker handles key uploads, and may also stub out presence if that is
## disabled. If you set extraConfig.use_presence=false then you may want to
## uncomment the second path.
##
frontend_proxy:
enabled: false
listeners: [client]
csPaths:
- "/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"
# - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status"
wellknown:
enabled: false
postgresql:
enabled: false
externalPostgresql:
port: 5432
username: synapse
existingSecret: synapse-secret
existingSecretPasswordKey: SYNAPSE_DATABASE_PASS
database: synapse
extraArgs: {}
redis:
enabled: false
externalRedis:
host: dragonfly.databases.svc.cluster.local
port: 6379
dbid: 7
## Persistence configuration for the media repository function.
## This PVC will be mounted in either Synapse or a media_repo worker.
##
## NB; If you want to be able to scale this, you will have to set the
## accessMode to RWX/ReadWriteMany.
##
persistence:
enabled: true
existingClaim: synapse
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 30Gi
## Set up an init container to chown the mounted media if necessary.
##
volumePermissions:
enabled: false
uid: 666
gid: 666
image:
repository: alpine
tag: latest
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## Configuration for the main Synapse service.
## To use TLS inside Synapse itself, add an TLS listener, and change the ports
## configured in here.
##
service:
type: ClusterIP
port: 8008
targetPort: http
## The K8s ingress configuration, this will be quite heavily used in order to
## set up all routing necessary for use with a sharded Synapse instance.
## If you're not using a Ingress compatible K8s ingress, you will need to set up
## your own routing instead.
##
ingress:
enabled: false
## Generate traefik-compatible regex paths instead of nginx-compatible ones.
##
traefikPaths: false
## Annotations to apply to the created ingress resource.
##
annotations: {}
# nginx.ingress.kubernetes.io/use-regex: "true"
# # Sync proxy-body-size with Synapse's max_upload_size which is 10M by default
# nginx.ingress.kubernetes.io/proxy-body-size: 10m
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
## Hosts to add to the ingress configuration for handling Client-to-Server
## API request paths.
##
## NB; config.serverName is included if includeServerName is set. (default)
##
csHosts: []
# - matrix.example.com
## Additional hosts to add to the ingress configuration for handling
## Server-to-Server API requests.
##
## NB; config.serverName is included if includeServerName is set. (default)
##
hosts: []
# - example.com
## Additional hosts to add to the ingress configuration for handling
## well-known requests.
##
## NB; config.serverName is included if includeServerName is set. (default)
##
wkHosts: []
# - example.com
## Additional paths to add to the Server-to-Server ingress blocks, will be
## inserted before the /_matrix catch-all path.
##
paths: []
# # K8s 1.19+
# - path: /_matrix/media
# pathType: Prefix
# backend:
# service:
# name: matrix-media-repo
# port: 8000
# # K8s <1.19
# - path: /_matrix/media
# backend:
# serviceName: matrix-media-repo
# servicePort: 8000
## Additional paths to add to the Client-to-Server ingress blocks, will be
## inserted before the /_matrix and /_synapse catch-all paths.
##
csPaths: []
# # K8s 1.19+
# - path: /_matrix/media
# pathType: Prefix
# backend:
# service:
# name: matrix-media-repo
# port:
# number: 8000
# # K8s <1.19
# - path: /_matrix/media
# backend:
# serviceName: matrix-media-repo
# servicePort: 8000
## Should the /_synapse path be included in the ingress, admin APIs are
## provided under this path.
##
includeUnderscoreSynapse: true
## Should config.serverName be included in the list of ingress paths, can be
## set to false if the main domain is managed in some external way.
##
includeServerName: true
## TLS configuration to include in the ingress configuration
##
tls: []
# - secretName: chart-example-tls
# hosts:
# - example.com
# - matrix.example.com
## Set the name of the IngressClass cluster resource (optional)
## https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-v1/#IngressSpec
# className: can-be-anything
## Specifies whether a service account should be created, and annotations to add.
##
# serviceAccount:
# create: false
# annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::000000000000:role/matrix-synapse
# name: non-default-service-name

View file

@ -4,7 +4,7 @@ description: |
An IP address management (IPAM) and data center infrastructure management (DCIM) tool.
appVersion: v3.7.1
type: application
version: 6.0.9
version: 6.1.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,137 @@
replicaCount: 1
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: netbox-secret
image:
repository: quay.io/netboxcommunity/netbox
tag: v3.7.5
pullPolicy: Always
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 80
existingSecret: netbox-secret
extraEnv: {}
# EMAIL_SERVER: localhost
# EMAIL_PORT: 25
# EMAIL_USERNAME: netbox
# EMAIL_TIMEOUT: 5
# EMAIL_FROM: netbox@example.com
# MAX_PAGE_SIZE: 1000
# WEBHOOKS_ENABLED: true
extraEnvFrom: []
# - configMapRef:
# name: custom-config
# - secretRef:
# name: custom-secrets
extraSecrets: {}
# EMAIL_PASSWORD: netbox
# Ends up stored as extra.py in the netbox configuration, must be valid Python
extraConfiguration: ''
# Will be stored in plugins.py in the netbox configuration, requires
# using/building a netbox image containing the required plugins, must be valid
# Python
#
# https://github.com/netbox-community/netbox-docker/wiki/Using-Netbox-Plugins
extraPlugins: ''
# PLUGINS = ["netbox_bgp"]
#
# PLUGINS_CONFIG = {
# "netbox_bgp": {
# ADD YOUR SETTINGS HERE
# }
# }
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 500m
# memory: 512Mi
nodeSelector: {}
tolerations: []
affinity: {}
extraVolumes: []
extraVolumeMounts: []
readinessProbe:
httpGet:
port: http
path: /
livenessProbe:
httpGet:
port: http
path: /
startupProbe:
tcpSocket:
port: http
failureThreshold: 12
persistence:
enabled: false
# existingClaim: netbox-data
# storageClass: "-"
accessMode: ReadWriteOnce
size: 10Gi
worker:
enabled: true
replicaCount: 1
resources: {}
# limits:
# cpu: 100m
# memory: 150Mi
# requests:
# cpu: 100m
# memory: 150Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Handles the Netbox 3.0+ housekeeping pod
housekeeping:
enabled: true
resources: {}
# limits:
# cpu: 100m
# memory: 32Mi
# requests:
# cpu: 100m
# memory: 32Mi
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -4,7 +4,7 @@ description: |
ntfy lets you send push notifications to your phone or desktop via scripts from any computer
appVersion: v2.10.0
type: application
version: 0.1.5
version: 0.2.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,9 @@
config:
baseUrl: https://ntfy.sh
persistence:
existingClaim: ntfy-sh-server
image:
repository: binwiederhier/ntfy
tag: v2.10.0

View file

@ -2,7 +2,7 @@ apiVersion: v2
name: ollama-web
description: ChatGPT-Style Web UI Client for Ollama 🦙
type: application
version: 0.1.0
version: 0.2.0
appVersion: "main"
icon: https://avatars.githubusercontent.com/u/147204191?s=200&v=4
maintainers:

View file

@ -0,0 +1,17 @@
replicaCount: 1
image:
repository: ghcr.io/ollama-webui/ollama-webui
tag: "main"
apiUrl: "http://ollama.llm.svc.cluster.local:11434/api"
servicePort: 8080
resources:
limits:
cpu: "500m"
memory: "500Mi"
volumeSize: 1Gi
nodeSelector: {}
tolerations: []
service:
type: NodePort
persistence:
existingClaim: ollama-web

View file

@ -4,7 +4,7 @@ description: |
document management system that transforms your physical documents into a searchable online archive
appVersion: 2.7.0
type: application
version: 0.1.8
version: 0.2.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,35 @@
config:
externalUrl: https://paperless.example.com/
timezone: America/Los_Angeles
ocrLanguage: eng
textRecognitionLanguages: eng nor
persistence:
mediaExistingClaim: paperless-ngx-media
dataExistingClaim: paperless-ngx-data
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: paperless-ngx-secret
envFromSecret: paperless-ngx-secret
image:
repository: ghcr.io/paperless-ngx/paperless-ngx
tag: 2.7.2
postgres:
secretName: paperless-ngx-secret
resources:
limits:
memory: "3Gi"
cpu: "1000m"
# requests:
# cpu: 100m
# memory: 250Mi

View file

@ -5,7 +5,7 @@ icon: https://avatars.githubusercontent.com/u/8418310?s=200&v=4
appVersion: "0.99.15"
type: application
version: 1.0.1
version: 1.1.0
maintainers:
- name: Tommy Skaug
email: tommy@skaug.me

View file

@ -0,0 +1,63 @@
homeserver: "https://synapse.example.com"
serverAddress: "https://slidingsync.example.com"
existingSecret: "slidingsync-secret"
initContainers:
dbInit:
image:
repository: ghcr.io/onedr0p/postgres-init
tag: "16"
envFrom:
- secretRef:
name: slidingsync-secret
image:
repository: ghcr.io/matrix-org/sliding-sync
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
replicaCount: 1
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
service:
type: ClusterIP
port: 80
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}