--- global: image: # -- if set it will overwrite all registry entries registry: # -- if set it will overwrite all pullPolicy pullPolicy: image: registry: ghcr.io repository: element-hq/synapse # -- Overrides the image tag whose default is the chart appVersion. tag: pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName # -- Override part of the installed name, will still keep release name. nameOverride: # -- Override the full name of the installed chart. fullnameOverride: # -- The Matrix domain name, this is what will be used for the domain part in your MXIDs. serverName: # -- The public Matrix server name, this will be used for any public URLs in config as well as for client API links in the ingress. publicServerName: ## The source of the signing key used by Synapse in federation. ## signingkey: # -- Annotations to apply to the job and rbac for signing-key. annotations: "helm.sh/hook": "pre-install,pre-upgrade" "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" ## Enable a Kubernetes job to generate and store a signing key if one does not ## exist. ## If you have already run a Matrix server at some point on your domain then ## you will want to keep the old signing key, either by using the `existingSecret` ## configuration, or by including the old key under `extraConfig.old_signing_keys` ## ## If you lose your signing key then any federation traffic from your instance ## might not be trusted any more by the wider network. ## ## After the initial install, the job should be disabled to speed up subsequent upgrades. ## job: enabled: true publishImage: registry: docker.io repository: bitnami/kubectl tag: latest pullPolicy: IfNotPresent ## Specify an existing signing key secret, will need to be created in advance. ## # existingSecret: secret-name # existingSecretKey: signing.key ## Resources to apply to the signing key generation job ## resources: {} # limits: # cpu: 100m # memory: 250Mi # requests: # cpu: 100m # memory: 250Mi ## Matrix configuration values that affect other parts of the chart, for any ## value not handled by this block, you will want to instead set it in ## extraConfig below. ## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml ## config: ## The publicly accessible URL for the Synapse instance, will default to ## https://. ## # publicBaseurl: 'https://matrix.example.com' ## The log level for Synapse and all modules. ## # logLevel: INFO ## Should usage stats be reported ## reportStats: false ## Protect registration with recaptcha. (optional) ## # recaptcha: # publicKey: '' # privateKey: '' ## URIs and secret key for TURN servers to use to help establish 1:1 WebRTC ## calls. ## # turnUris: [] # turnSecret: '' ## Registration configuration, note that registration with the ## container-internal register_new_matrix_user tool is always possible. ## # enableRegistration: false ## NB; this value will default to a random string if not specified. # registrationSharedSecret: '' ## NB; Strongly recommended to set this to a secure value. # macaroonSecretKey: '' # -- A set of trusted servers to contact if another server doesn't respond to a signing key request. trustedKeyServers: - server_name: matrix.org # verify_keys: # "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" # -- The bind addresses to use for the default listeners bindAddresses: - '::' ## Extra listeners to configure. ## extraListeners: [] # - port: 9000 # bind_addresses: ['::'] # type: manhole ## Extra listener types to add onto the main listener. ## extraMainListenerTypes: [] # - consent ## Logging # use TerseJsonFormatter structured logging # Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html useStructuredLogging: false ## Specify arbitrary Synapse configuration here; ## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml ## extraConfig: {} # old_signing_keys: # "ed25519:id": { key: "base64string", expired_ts: 123456789123 } # use_presence: false # enable_search: false # federation_domain_whitelist: # - lon.example.com # - nyc.example.com # - syd.example.com # dynamic_thumbnails: true ## Specify additional loggers configutation here; ## Ref: https://matrix-org.github.io/synapse/latest/structured_logging.html extraLoggers: {} # synapse.storage.SQL: # level: WARNING ## Specify arbitrary - secret - Synapse configuration here; ## These values will be stored in secrets instead of configmaps ## Ref: https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml ## extraSecrets: {} # password_config: # pepper: '' ## Configuration to apply to the main Synapse pod. ## synapse: ## Only really applicable when the deployment has an RWO PV attached (e.g. when media repository ## is enabled for the main Synapse pod) ## Since replicas = 1, an update can get "stuck", as the previous pod remains attached to the ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will ## terminate the single previous pod, so that the new, incoming pod can attach to the PV ## strategy: type: RollingUpdate ## Annotations to apply to the main Synapse pod. ## annotations: {} # prometheus.io/scrape: "true" # prometheus.io/path: "/_synapse/metrics" # prometheus.io/port: "9090" ## Labels to apply to the main Synapse pod. ## labels: {} ## Additional environment variables to apply to the main Synapse pod ## extraEnv: [] # - name: LD_PRELOAD # value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 # - name: SYNAPSE_CACHE_FACTOR # value: "2" ## Additional volumes to mount into Synapse ## extraVolumes: [] # - name: spamcheck # flexVolume: # driver: ananace/git-live # options: # repo: https://github.com/company/synapse-module # interval: 1d # readOnly: true extraVolumeMounts: [] # - name: spamcheck # mountPath: /usr/local/lib/python3.7/site-packages/company ## Extra commands to run when starting Synapse ## extraCommands: [] # - 'apt-get update -yqq && apt-get install patch -yqq' # - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch' ## Configuration for the pod security policy, Synapse will by always run as ## its own user, even if not set. ## Note that changing this may also require you to use the volumePermission ## helper depending on your storage. ## ## NB; The synapse install is currently unable to run as anything but UID:GID ## 666:666. ## podSecurityContext: {} # fsGroup: 666 # runAsGroup: 666 # runAsUser: 666 ## Configuration for the container security policy, refer to the above ## podSecurityContext for more relevant information. ## securityContext: {} # capabilities: # drop: # - ALL # readOnlyRootFilesystem: true # runAsNonRoot: true # runAsUser: 666 ## Resources to apply to the main Synapse pod. ## resources: {} # limits: # cpu: 1000m # memory: 2500Mi # requests: # cpu: 1000m # memory: 2500Mi ## Liveness probe configuration to use ## livenessProbe: httpGet: path: /health port: http ## Readiness probe configuration to use ## readinessProbe: httpGet: path: /health port: http ## Startup probe configuration to use ## startupProbe: failureThreshold: 12 httpGet: path: /health port: http ## Node selectors to set for the main Synapse pod. ## nodeSelector: {} ## Tolerations to set for the main Synapse pod. ## tolerations: [] ## Affinity to set for the main Synapse pod. ## affinity: {} ## Configuration for handling Synapse workers, which are useful for handling ## high-load deployments. ## ## More information is available at; ## https://github.com/matrix-org/synapse/blob/master/docs/workers.md ## workers: ## Default configuration, this is inherited into all workers, and can also be ## overriden on each worker type. ## default: ## The number of worker replicas, note that some workers require special ## handling. Refer to the information URL above. ## replicaCount: 1 ## Update strategy - only really applicable for deployments with RWO PVs attached (e.g. media repository) ## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the ## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will ## terminate the single previous pod, so that the new, incoming pod can attach to the PV ## strategy: type: RollingUpdate ## A specific name for this worker, can't be set globally. ## Note that this can only be set when replicaCount is 1 # name: ## Additional configuration to set for the worker, can't be set globally. # extraConfig: {} ## Annotations to apply to the worker. ## annotations: {} # prometheus.io/scrape: "true" # prometheus.io/path: /_synapse/metrics # prometheus.io/port: 9090 ## Additional environment variables to add to the worker. ## extraEnv: [] # - name: LD_PRELOAD # value: /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 # - name: SYNAPSE_CACHE_FACTOR # value: "1.0" ## Additional volumes to add to the worker. ## Useful for the media repo, or for adding Python modules. ## volumes: [] volumeMounts: [] ## Extra commands to run when starting Synapse ## extraCommands: [] # - 'apt-get update -yqq && apt-get install patch -yqq' # - 'patch -d/usr/local/lib/python3.7/site-packages/synapse -p2 < /synapse/patches/something.patch' ## Security context information to set to the worker. ## podSecurityContext: {} # fsGroup: 666 # runAsGroup: 666 # runAsUser: 666 ## Container security context information to set to the worker. ## securityContext: {} # capabilities: # drop: # - ALL # readOnlyRootFilesystem: true # runAsNonRoot: true # runAsUser: 666 ## Resources to apply to the worker. ## resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## Liveness probe configuration to use ## livenessProbe: httpGet: path: /health port: metrics ## Readiness probe configuration to use ## readinessProbe: httpGet: path: /health port: metrics ## Readiness probe configuration to use ## Defaults to allowing workers 60 seconds to start up ## startupProbe: failureThreshold: 6 httpGet: path: /health port: metrics ## Node selector configuration to set on the worker. ## nodeSelector: {} ## Toleration configuration to set on the worker. ## tolerations: [] ## Affinity configuration to set on the worker. ## affinity: {} ## The generic worker can be used to handle most endpoints. ## Be careful when enabling the sync endpoints as they can eat large amounts of ## resources. Refer to the information URL above for more info. ## Proper load balancing with the K8s Ingress resource may not be possible. ## generic_worker: enabled: false generic: true listeners: [client, federation] csPaths: ## Sync requests # - "/_matrix/client/(r0|v3)/sync$" - "/_matrix/client/(api/v1|r0|v3)/events$" # - "/_matrix/client/(api/v1|r0|v3)/initialSync$" # - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$" ## Client API requests - "/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$" - "/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$" - "/_matrix/client/v1/rooms/.*/hierarchy$" - "/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$" - "/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$" - "/_matrix/client/(r0|v3|unstable)/account/3pid$" - "/_matrix/client/(r0|v3|unstable)/account/whoami$" - "/_matrix/client/(r0|v3|unstable)/devices$" - "/_matrix/client/versions$" - "/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/" - "/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$" - "/_matrix/client/(api/v1|r0|v3|unstable)/search$" ## Encryption requests - "/_matrix/client/(r0|v3|unstable)/keys/query$" - "/_matrix/client/(r0|v3|unstable)/keys/changes$" - "/_matrix/client/(r0|v3|unstable)/keys/claim$" - "/_matrix/client/(r0|v3|unstable)/room_keys/" ## Registration/login requests - "/_matrix/client/(api/v1|r0|v3|unstable)/login$" - "/_matrix/client/(r0|v3|unstable)/register$" - "/_matrix/client/v1/register/m.login.registration_token/validity$" ## Event sending requests - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/" - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$" - "/_matrix/client/(api/v1|r0|v3|unstable)/join/" - "/_matrix/client/(api/v1|r0|v3|unstable)/profile/" ## User directory search requests - "/_matrix/client/(r0|v3|unstable)/user_directory/search" ## Worker event streams ## See https://matrix-org.github.io/synapse/latest/workers.html#stream-writers ## ## The typing event stream # - "/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing" ## The to_device event stream # - "/_matrix/client/(r0|v3|unstable)/sendToDevice/" ## The account_data event stream # - "/_matrix/client/(r0|v3|unstable)/.*/tags" # - "/_matrix/client/(r0|v3|unstable)/.*/account_data" ## The receipts event stream # - "/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt" # - "/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers" ## The presence event stream # - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/" paths: ## Federation requests - "/_matrix/federation/v1/event/" - "/_matrix/federation/v1/state/" - "/_matrix/federation/v1/state_ids/" - "/_matrix/federation/v1/backfill/" - "/_matrix/federation/v1/get_missing_events/" - "/_matrix/federation/v1/publicRooms" - "/_matrix/federation/v1/query/" - "/_matrix/federation/v1/make_join/" - "/_matrix/federation/v1/make_leave/" - "/_matrix/federation/(v1|v2)/send_join/" - "/_matrix/federation/(v1|v2)/send_leave/" - "/_matrix/federation/(v1|v2)/invite/" - "/_matrix/federation/v1/event_auth/" - "/_matrix/federation/v1/exchange_third_party_invite/" - "/_matrix/federation/v1/user/devices/" - "/_matrix/key/v2/query" - "/_matrix/federation/v1/hierarchy/" ## Inbound federation transaction request - "/_matrix/federation/v1/send/" ## To separate the generic worker into specific concerns - for example federation transaction receiving; ## NB; This worker should have incoming traffic routed based on source IP, which is ## left as an exercise to the reader. ## https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing # federation_reader: # enabled: true # generic: true # listeners: [federation] # paths: # - "/_matrix/federation/v1/send/" ## Or /sync handling. ## NB; Care should be taken to route users to the same instance when scaling this worker, ## this is left as an exercise to the reader. ## https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#load-balancing # synchrotron: # enabled: true # generic: true # listeners: [client] # csPaths: # - "/_matrix/client/(v2_alpha|r0|v3)/sync$" # - "/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$" # - "/_matrix/client/(api/v1|r0|v3)/initialSync$" # - "/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$" ## Specialized - non-generic workers below; ## This worker deals with pushing notifications. ## NB; Only one instance of this worker can be run at a time, refer to the ## information URL above. ## pusher: enabled: false ## This worker handles sending data to registered appservices. ## NB; Only one instance of this worker can be run at at time, refer to the ## information URL above. ## appservice: enabled: false generic: true name: appservices ## This worker handles sending federation traffic to other Synapse servers. ## federation_sender: enabled: false ## Specialized workers can also be run as multiple separate instances, ## make sure to read the relevant documentation. ## # federation_sender_other: # app: federation_sender # enabled: false ## This worker deals with serving and storing media. ## NB; Running multiple instances will conflict with background jobs. ## media_repository: enabled: false listeners: [media] csPaths: - "/_matrix/media/.*" - "/_synapse/admin/v1/purge_media_cache$" - "/_synapse/admin/v1/room/.*/media" - "/_synapse/admin/v1/user/.*/media" - "/_synapse/admin/v1/media/" - "/_synapse/admin/v1/quarantine_media/" - "/_synapse/admin/v1/users/.*/media$" paths: - "/_matrix/media/.*" ## This worker deals with user directory searches. ## user_dir: enabled: false name: userdir listeners: [client] csPaths: - "/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$" ## This worker handles key uploads, and may also stub out presence if that is ## disabled. If you set extraConfig.use_presence=false then you may want to ## uncomment the second path. ## frontend_proxy: enabled: false listeners: [client] csPaths: - "/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload" # - "/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status" ## This will set up a Lighttpd server to respond to any ## /.well-known/matrix/server requests, to make federation possible without ## adding SRV-records to DNS. ## wellknown: enabled: false image: registry: docker.io repository: library/nginx tag: 1.27.3 pullPolicy: IfNotPresent replicaCount: 1 podLabels: {} podAnnotations: [] podSecurityContext: {} securityContext: {} containerPort: 80 env: [] resources: {} nodeSelector: {} tolerations: [] affinity: {} service: type: ClusterIP port: 8080 annotations: {} # -- The host and port combo to serve on .well-known/matrix/server. # m.server: matrix.example.com:443 server: {} # -- Data to serve on .well-known/matrix/client. # m.homeserver: # base_url: https://matrix.example.com client: {} # -- Data to serve on .well-known/element/element.json # call: # widget_url: https://call.element.io element: {} ## This configuration is for setting up the internally provided Postgres server, ## if you instead want to use an existing server, then you may want to set ## enabled to false and configure the externalPostgresql block. ## postgresql: enabled: true auth: # XXX Change me! password: synapse ## Or use existing secret with "password" key ## instead of static password ## # existingSecret: postgresql-secret username: synapse database: synapse primary: initdb: args: "--lc-collate=C --lc-ctype=C" persistence: # storageClass: "-" size: 16Gi # -- set extra configuration # ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config # e.g. txn_limit, allow_unsafe_locale options: {} # -- Extra arguments for the database connection # ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config extraArgs: {} ## An externally configured Postgres server to use for Synapse's database, note ## that the database needs to have both COLLATE and CTYPE set to "C". ## externalPostgresql: # host: postgres port: 5432 username: synapse # password: synapse ## The name of an existing secret with postgresql credentials # existingSecret: postgres-secrets ## Password key to be retrieved from existing secret # existingSecretPasswordKey: postgres-password database: synapse # sslmode: prefer # -- set extra configuration # ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config # e.g. txn_limit, allow_unsafe_locale options: {} # -- Extra arguments for the database connection # ref: https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md#synapse-config extraArgs: {} ## This configuration is for the internal Redis that's deployed for use with ## workers/sharding, for an external Redis server you want to set enabled to ## false and configure the externalRedis block. ## redis: enabled: true ## Database ID for non-default database # dbid: 0 auth: enabled: true # XXX Change me! password: synapse ## Or use existing secret with "redis-password" key ## instead of static password ## # existingSecret: redis-secret architecture: standalone master: kind: Deployment persistence: ## Note that Synapse only uses redis as a synchronization utility, so no ## data will ever need to be persisted. ## enabled: false service: port: 6379 ## An externally configured Redis server to use for workers/sharding. ## externalRedis: # host: redis port: 6379 # password: synapse ## Database ID for non-default database # dbid: 0 ## The name of an existing secret with redis credentials # existingSecret: redis-secrets ## Password key to be retrieved from existing secret # existingSecretPasswordKey: redis-password ## Persistence configuration for the media repository function. ## This PVC will be mounted in either Synapse or a media_repo worker. ## ## NB; If you want to be able to scale this, you will have to set the ## accessMode to RWX/ReadWriteMany. ## persistence: enabled: true annotations: {} # -- Persistent Volume Storage Class # If defined, storageClassName: # If set to "-", storageClassName: "", which disables dynamic provisioning # If undefined (the default) or set to null, no storageClassName spec is # set, choosing the default provisioner. (gp2 on AWS, standard on # GKE, AWS & OpenStack) storageClass: # -- A manually managed Persistent Volume and Claim # Requires persistence.enabled: true # If defined, PVC must be created manually before volume will be bound existingClaim: # -- Do not create an PVC, direct use hostPath in Pod hostPath: accessMode: ReadWriteOnce size: 10Gi ## Set up an init container to chown the mounted media if necessary. ## volumePermissions: enabled: false uid: 666 gid: 666 image: registry: docker.io repository: library/alpine tag: 3.21.0 pullPolicy: Always ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName resources: {} # resources: # requests: # memory: 128Mi # cpu: 100m ## Configuration for the main Synapse service. ## To use TLS inside Synapse itself, add an TLS listener, and change the ports ## configured in here. ## service: type: ClusterIP port: 8008 targetPort: http ## The K8s ingress configuration, this will be quite heavily used in order to ## set up all routing necessary for use with a sharded Synapse instance. ## If you're not using a Ingress compatible K8s ingress, you will need to set up ## your own routing instead. ## ingress: enabled: true ## Generate traefik-compatible regex paths instead of nginx-compatible ones. ## traefikPaths: false ## Annotations to apply to the created ingress resource. ## annotations: {} # nginx.ingress.kubernetes.io/use-regex: "true" # # Sync proxy-body-size with Synapse's max_upload_size which is 10M by default # nginx.ingress.kubernetes.io/proxy-body-size: 10m # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" ## Hosts to add to the ingress configuration for handling Client-to-Server ## API request paths. ## ## NB; config.serverName is included if includeServerName is set. (default) ## csHosts: [] # - matrix.example.com ## Additional hosts to add to the ingress configuration for handling ## Server-to-Server API requests. ## ## NB; config.serverName is included if includeServerName is set. (default) ## hosts: [] # - example.com ## Additional hosts to add to the ingress configuration for handling ## well-known requests. ## ## NB; config.serverName is included if includeServerName is set. (default) ## wkHosts: [] # - example.com ## Additional paths to add to the Server-to-Server ingress blocks, will be ## inserted before the /_matrix catch-all path. ## paths: [] # # K8s 1.19+ # - path: /_matrix/media # pathType: Prefix # backend: # service: # name: matrix-media-repo # port: 8000 # # K8s <1.19 # - path: /_matrix/media # backend: # serviceName: matrix-media-repo # servicePort: 8000 ## Additional paths to add to the Client-to-Server ingress blocks, will be ## inserted before the /_matrix and /_synapse catch-all paths. ## csPaths: [] # # K8s 1.19+ # - path: /_matrix/media # pathType: Prefix # backend: # service: # name: matrix-media-repo # port: # number: 8000 # # K8s <1.19 # - path: /_matrix/media # backend: # serviceName: matrix-media-repo # servicePort: 8000 ## Should the /_synapse path be included in the ingress, admin APIs are ## provided under this path. ## includeUnderscoreSynapse: true ## Should config.serverName be included in the list of ingress paths, can be ## set to false if the main domain is managed in some external way. ## includeServerName: true ## TLS configuration to include in the ingress configuration ## tls: [] # - secretName: chart-example-tls # hosts: # - example.com # - matrix.example.com ## Set the name of the IngressClass cluster resource (optional) ## https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-v1/#IngressSpec # className: can-be-anything ## Specifies whether a service account should be created, and annotations to add. ## serviceAccount: create: false annotations: {} # eks.amazonaws.com/role-arn: arn:aws:iam::000000000000:role/matrix-synapse # name: non-default-service-name prometheus: podmonitor: enabled: false labels: {} rules: enabled: false labels: {} defaults: enabled: true additionalRules: [] grafana: dashboards: enabled: false labels: grafana_dashboard: "1" annotations: {}