feat(metallb/cloudnative-pg/CI): BREAKING CHANGE move to consume upstream helm charts (#14681)

**Description**

To ensure stability, we've decided to directly use upstream helm-charts
from their respective projects for operators specifically.
Mostly because they heavily rely on rbac, crds and crs, which could
cause conflicts between shipped rbac/crd and container versions.

also patches the ci for a few other operators to exclude them from
signing requirements

⚒️ Fixes  # <!--(issue)-->

**⚙️ Type of change**

- [ ] ⚙️ Feature/App addition
- [ ] 🪛 Bugfix
- [x] ⚠️ Breaking change (fix or feature that would cause existing
functionality to not work as expected)
- [x] 🔃 Refactor of current code

**🧪 How Has This Been Tested?**
<!--
Please describe the tests that you ran to verify your changes. Provide
instructions so we can reproduce. Please also list any relevant details
for your test configuration
-->

**📃 Notes:**
<!-- Please enter any other relevant information here -->

**✔️ Checklist:**

- [ ] ⚖️ My code follows the style guidelines of this project
- [ ] 👀 I have performed a self-review of my own code
- [ ] #️⃣ I have commented my code, particularly in hard-to-understand
areas
- [ ] 📄 I have made corresponding changes to the documentation
- [ ] ⚠️ My changes generate no new warnings
- [ ] 🧪 I have added tests to this description that prove my fix is
effective or that my feature works
- [ ] ⬆️ I increased versions for any altered app according to semantic
versioning

** App addition**

If this PR is an app addition please make sure you have done the
following.

- [x] 🪞 I have opened a PR on
[truecharts/containers](https://github.com/truecharts/containers) adding
the container to TrueCharts mirror repo.
- [x] 🖼️ I have added an icon in the Chart's root directory called
`icon.png`

---

_Please don't blindly check all the boxes. Read them and only check
those that apply.
Those checkboxes are there for the reviewer to see what is this all
about and
the status of this PR with a quick glance._
This commit is contained in:
Kjeld Schouten 2023-11-15 15:39:03 +01:00 committed by GitHub
parent 0b9c28ea82
commit 27664e7ff3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 73 additions and 15250 deletions

View File

@ -51,3 +51,6 @@ chart-repos:
- truecharts-deps=https://deps.truecharts.org
- jetstack=https://charts.jetstack.io
- vmwaretanzu=https://vmware-tanzu.github.io/helm-charts
- cnpg=https://cloudnative-pg.github.io/charts
- metallb=https://metallb.github.io/metallb
- prometheus-community=https://prometheus-community.github.io/helm-charts

View File

@ -16,3 +16,6 @@ chart-repos:
- truecharts-deps=https://deps.truecharts.org
- jetstack=https://charts.jetstack.io
- vmwaretanzu=https://vmware-tanzu.github.io/helm-charts
- cnpg=https://cloudnative-pg.github.io/charts
- metallb=https://metallb.github.io/metallb
- prometheus-community=https://prometheus-community.github.io/helm-charts

View File

@ -113,6 +113,15 @@ for idx in $(eval echo "{0..$length}"); do
elif [[ "$name" =~ "velero" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "metallb" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "cloudnative-pg" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "kube-prometheus-stack" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "cert-manager" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" --verify --keyring $gpg_dir/certman.gpg || \
helm dependency update "$charts_path/$train_chart/Chart.yaml" --verify --keyring $gpg_dir/certman.gpg || exit 1
@ -129,6 +138,12 @@ for idx in $(eval echo "{0..$length}"); do
helm verify $cache_path/$repo_dir/$name-$version.tgz --keyring $gpg_dir/certman.gpg || exit 1
elif [[ "$name" =~ "velero" ]]; then
echo "Velero is not signed..."
elif [[ "$name" =~ "metallb" ]]; then
echo "metallb is not signed..."
elif [[ "$name" =~ "cloudnative-pg" ]]; then
echo "cloudnative-pg is not signed..."
elif [[ "$name" =~ "kube-prometheus-stack" ]]; then
echo "kube-prometheus-stack is not signed..."
elif [[ ! "$train_chart" =~ incubator\/.* ]]; then
echo "Validating dependency signature..."
helm verify $cache_path/$repo_dir/$name-$version.tgz --keyring $gpg_dir/pubring.gpg || \
@ -145,6 +160,15 @@ for idx in $(eval echo "{0..$length}"); do
elif [[ "$name" =~ "velero" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "metallb" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "cloudnative-pg" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "kube-prometheus-stack" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" || \
helm dependency update "$charts_path/$train_chart/Chart.yaml"|| exit 1
elif [[ "$name" =~ "cert-manager" ]]; then
helm dependency build "$charts_path/$train_chart/Chart.yaml" --verify --keyring $gpg_dir/certman.gpg || \
helm dependency update "$charts_path/$train_chart/Chart.yaml" --verify --keyring $gpg_dir/certman.gpg || exit 1

View File

@ -58,6 +58,10 @@ jobs:
helm repo add truecharts-deps https://deps.truecharts.org
helm repo add truecharts-library https://library-charts.truecharts.org
helm repo add jetstack https://charts.jetstack.io
helm repo add vmwaretanzu https://vmware-tanzu.github.io/helm-charts
helm repo add cnpg https://cloudnative-pg.github.io/charts
helm repo add metallb https://metallb.github.io/metallb
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
- name: Collect changes (branch-based)

View File

@ -74,6 +74,9 @@ jobs:
helm repo add truecharts-library https://library-charts.truecharts.org
helm repo add truecharts-deps https://deps.truecharts.org
helm repo add jetstack https://charts.jetstack.io
helm repo add vmwaretanzu https://vmware-tanzu.github.io/helm-charts
helm repo add cnpg https://cloudnative-pg.github.io/charts
helm repo add metallb https://metallb.github.io/metallb
helm repo update
# Optional step if GPG signing is used

View File

@ -151,6 +151,9 @@ jobs:
helm repo add truecharts-library https://library-charts.truecharts.org
helm repo add jetstack https://charts.jetstack.io
helm repo add vmwaretanzu https://vmware-tanzu.github.io/helm-charts
helm repo add cnpg https://cloudnative-pg.github.io/charts
helm repo add metallb https://metallb.github.io/metallb
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
- name: Add Dependencies

View File

@ -247,6 +247,9 @@ jobs:
helm repo add truecharts-deps https://deps.truecharts.org
helm repo add jetstack https://charts.jetstack.io
helm repo add vmwaretanzu https://vmware-tanzu.github.io/helm-charts
helm repo add cnpg https://cloudnative-pg.github.io/charts
helm repo add metallb https://metallb.github.io/metallb
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
- name: Checkout

View File

@ -12,6 +12,10 @@ dependencies:
- name: common
repository: https://library-charts.truecharts.org
version: 14.3.5
- name: cloudnative-pg
repository: https://cloudnative-pg.github.io/charts
version: 0.19.1
alias: cloudnative-pg
kubeVersion: ">=1.16.0-0"
maintainers:
- email: info@truecharts.org
@ -23,7 +27,7 @@ sources:
- https://github.com/cloudnative-pg
- https://cloudnative-pg.io/
type: application
version: 2.0.12
version: 3.0.0
annotations:
truecharts.org/category: operators
truecharts.org/SCALE-support: "true"

View File

@ -1,85 +0,0 @@
{{- define "cnpg.webhooks.mutating" -}}
{{- if .Values.webhook.mutating.create }}
{{- $cnpgLabels := .Values.webhook.validating.labels -}}
{{- $cnpgAnnotations := .Values.webhook.validating.annotations -}}
{{- $labels := (mustMerge ($cnpgLabels | default dict) (include "tc.v1.common.lib.metadata.allLabels" $ | fromYaml)) }}
{{- $annotations := (mustMerge ($cnpgAnnotations | default dict) (include "tc.v1.common.lib.metadata.allAnnotations" $ | fromYaml)) }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: cnpg-mutating-webhook-configuration
{{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $ "labels" $labels) | trim) }}
labels:
{{- . | nindent 4 }}
{{- end }}
{{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $ "annotations" $annotations) | trim) }}
annotations:
{{- . | nindent 4 }}
{{- end }}
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /mutate-postgresql-cnpg-io-v1-backup
port: 443
failurePolicy: {{ .Values.webhook.mutating.failurePolicy }}
name: mbackup.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- backups
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /mutate-postgresql-cnpg-io-v1-cluster
port: 443
failurePolicy: {{ .Values.webhook.mutating.failurePolicy }}
name: mcluster.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- clusters
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
port: 443
failurePolicy: {{ .Values.webhook.mutating.failurePolicy }}
name: mscheduledbackup.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- scheduledbackups
sideEffects: None
{{- end }}
{{- end -}}

View File

@ -1,106 +0,0 @@
{{- define "cnpg.webhooks.validating" -}}
{{- if .Values.webhook.validating.create }}
{{- $cnpgLabels := .Values.webhook.validating.labels -}}
{{- $cnpgAnnotations := .Values.webhook.validating.annotations -}}
{{- $labels := (mustMerge ($cnpgLabels | default dict) (include "tc.v1.common.lib.metadata.allLabels" $ | fromYaml)) }}
{{- $annotations := (mustMerge ($cnpgAnnotations | default dict) (include "tc.v1.common.lib.metadata.allAnnotations" $ | fromYaml)) }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: cnpg-validating-webhook-configuration
{{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $ "labels" $labels) | trim) }}
labels:
{{- . | nindent 4 }}
{{- end }}
{{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $ "annotations" $annotations) | trim) }}
annotations:
{{- . | nindent 4 }}
{{- end }}
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /validate-postgresql-cnpg-io-v1-backup
port: 9443
failurePolicy: {{ .Values.webhook.validating.failurePolicy }}
name: vbackup.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- backups
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /validate-postgresql-cnpg-io-v1-cluster
port: 443
failurePolicy: {{ .Values.webhook.validating.failurePolicy }}
name: vcluster.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- clusters
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /validate-postgresql-cnpg-io-v1-scheduledbackup
port: 443
failurePolicy: {{ .Values.webhook.validating.failurePolicy }}
name: vscheduledbackup.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- scheduledbackups
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: cnpg-webhook-service
namespace: {{ .Release.Namespace }}
path: /validate-postgresql-cnpg-io-v1-pooler
port: 443
failurePolicy: {{ .Values.webhook.validating.failurePolicy }}
name: vpooler.cnpg.io
rules:
- apiGroups:
- postgresql.cnpg.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- poolers
sideEffects: None
{{- end }}
{{- end -}}

View File

@ -1,8 +1,5 @@
{{/* Make sure all variables are set properly */}}
{{- include "tc.v1.common.loader.init" . }}
{{- include "cnpg.webhooks.validating" . -}}
{{- include "cnpg.webhooks.mutating" . -}}
{{/* Render the templates */}}
{{ include "tc.v1.common.loader.apply" . }}

File diff suppressed because it is too large Load Diff

View File

@ -1,835 +1,25 @@
image:
repository: tccr.io/truecharts/cloudnative-pg
tag: "v1.21.1@sha256:163bc6e7f03c15fb0c68a14ff30c8f9f6e2a990d7c1034df0e2b473c5116cab5"
pullPolicy:
workload:
main:
podSpec:
containers:
main:
args:
- controller
- --leader-elect
- --config-map-name={{ include "tc.v1.common.lib.chart.names.fullname" $ }}-config
- --secret-name={{ include "tc.v1.common.lib.chart.names.fullname" $ }}-config
- --webhook-port=9443
command:
- /manager
probes:
liveness:
port: webhook
type: https
path: /readyz
readiness:
port: webhook
type: https
path: /readyz
startup:
port: webhook
type: tcp
env:
OPERATOR_IMAGE_NAME: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
OPERATOR_NAMESPACE:
fieldRef:
fieldPath: metadata.namespace
MONITORING_QUERIES_CONFIGMAP: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}-monitoring'
podOptions:
automountServiceAccountToken: true
repository: tccr.io/truecharts/alpine
pullPolicy: IfNotPresent
tag: latest@sha256:17cd77e25d3fa829d168caec4db7bb5b52ceeb935d8ca0d1180de6f615553dc4
service:
main:
enabled: false
ports:
main:
protocol: http
port: 8080
cnpg-webhook-service:
enabled: true
expandObjectName: false
ports:
webhook:
enabled: true
protocol: https
port: 443
targetPort: 9443
enabled: false
operator:
register: true
persistence:
scratch-data:
enabled: true
type: emptyDir
mountPath: /controller
webhook-certificates:
enabled: true
type: secret
objectName: cnpg-webhook-cert
expandObjectName: false
optional: true
defaultMode: "0420"
readOnly: true
targetSelector:
main:
main:
mountPath: "/run/secrets/cnpg.io/webhook"
workload:
main:
enabled: false
portal:
open:
enabled: false
metrics:
main:
enabled: false
type: "podmonitor"
endpoints:
- port: main
interval: 5s
scrapeTimeout: 5s
path: /
honorLabels: false
rbac:
main:
enabled: true
primary: true
clusterWide: true
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- secrets/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- get
- list
- patch
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- list
- patch
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- update
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- monitoring.coreos.com
resources:
- podmonitors
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- backups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- backups/status
verbs:
- get
- patch
- update
- apiGroups:
- postgresql.cnpg.io
resources:
- clusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- clusters/finalizers
verbs:
- update
- apiGroups:
- postgresql.cnpg.io
resources:
- clusters/status
verbs:
- get
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- poolers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- poolers/finalizers
verbs:
- update
- apiGroups:
- postgresql.cnpg.io
resources:
- poolers/status
verbs:
- get
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- scheduledbackups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- postgresql.cnpg.io
resources:
- scheduledbackups/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots
verbs:
- create
- get
- list
- patch
- watch
serviceAccount:
main:
enabled: true
primary: true
webhook:
mutating:
create: true
failurePolicy: Fail
validating:
create: true
failurePolicy: Fail
operator:
register: true
manifestManager:
enabled: false
configmap:
config:
enabled: true
data:
CREATE_ANY_SERVICE: "true"
monitoring:
enabled: true
data:
queries: |
backends:
query: |
SELECT sa.datname
, sa.usename
, sa.application_name
, states.state
, COALESCE(sa.count, 0) AS total
, COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
FROM ( VALUES ('active')
, ('idle')
, ('idle in transaction')
, ('idle in transaction (aborted)')
, ('fastpath function call')
, ('disabled')
) AS states(state)
LEFT JOIN (
SELECT datname
, state
, usename
, COALESCE(application_name, '') AS application_name
, COUNT(*)
, COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
FROM pg_catalog.pg_stat_activity
GROUP BY datname, state, usename, application_name
) sa ON states.state = sa.state
WHERE sa.usename IS NOT NULL
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- usename:
usage: "LABEL"
description: "Name of the user"
- application_name:
usage: "LABEL"
description: "Name of the application"
- state:
usage: "LABEL"
description: "State of the backend"
- total:
usage: "GAUGE"
description: "Number of backends"
- max_tx_duration_seconds:
usage: "GAUGE"
description: "Maximum duration of a transaction in seconds"
backends_waiting:
query: |
SELECT count(*) AS total
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.granted
metrics:
- total:
usage: "GAUGE"
description: "Total number of backends that are currently waiting on other queries"
pg_database:
query: |
SELECT datname
, pg_catalog.pg_database_size(datname) AS size_bytes
, pg_catalog.age(datfrozenxid) AS xid_age
, pg_catalog.mxid_age(datminmxid) AS mxid_age
FROM pg_catalog.pg_database
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
- xid_age:
usage: "GAUGE"
description: "Number of transactions from the frozen XID to the current one"
- mxid_age:
usage: "GAUGE"
description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
pg_postmaster:
query: |
SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
FROM pg_catalog.pg_postmaster_start_time()
metrics:
- start_time:
usage: "GAUGE"
description: "Time at which postgres started (based on epoch)"
pg_replication:
query: "SELECT CASE WHEN (
NOT pg_catalog.pg_is_in_recovery()
OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
THEN 0
ELSE GREATEST (0,
EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
END AS lag,
pg_catalog.pg_is_in_recovery() AS in_recovery,
EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
(SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind primary in seconds"
- in_recovery:
usage: "GAUGE"
description: "Whether the instance is in recovery"
- is_wal_receiver_up:
usage: "GAUGE"
description: "Whether the instance wal_receiver is up"
- streaming_replicas:
usage: "GAUGE"
description: "Number of streaming replicas connected to the instance"
pg_replication_slots:
query: |
SELECT slot_name,
slot_type,
database,
active,
(CASE pg_catalog.pg_is_in_recovery()
WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
END) as pg_wal_lsn_diff
FROM pg_catalog.pg_replication_slots
WHERE NOT temporary
metrics:
- slot_name:
usage: "LABEL"
description: "Name of the replication slot"
- slot_type:
usage: "LABEL"
description: "Type of the replication slot"
- database:
usage: "LABEL"
description: "Name of the database"
- active:
usage: "GAUGE"
description: "Flag indicating whether the slot is active"
- pg_wal_lsn_diff:
usage: "GAUGE"
description: "Replication lag in bytes"
pg_stat_archiver:
query: |
SELECT archived_count
, failed_count
, COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
, COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
, COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
, COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
, COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
, COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
, EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
FROM pg_catalog.pg_stat_archiver
metrics:
- archived_count:
usage: "COUNTER"
description: "Number of WAL files that have been successfully archived"
- failed_count:
usage: "COUNTER"
description: "Number of failed attempts for archiving WAL files"
- seconds_since_last_archival:
usage: "GAUGE"
description: "Seconds since the last successful archival operation"
- seconds_since_last_failure:
usage: "GAUGE"
description: "Seconds since the last failed archival operation"
- last_archived_time:
usage: "GAUGE"
description: "Epoch of the last time WAL archiving succeeded"
- last_failed_time:
usage: "GAUGE"
description: "Epoch of the last time WAL archiving failed"
- last_archived_wal_start_lsn:
usage: "GAUGE"
description: "Archived WAL start LSN"
- last_failed_wal_start_lsn:
usage: "GAUGE"
description: "Last failed WAL LSN"
- stats_reset_time:
usage: "GAUGE"
description: "Time at which these statistics were last reset"
pg_stat_bgwriter:
query: |
SELECT checkpoints_timed
, checkpoints_req
, checkpoint_write_time
, checkpoint_sync_time
, buffers_checkpoint
, buffers_clean
, maxwritten_clean
, buffers_backend
, buffers_backend_fsync
, buffers_alloc
FROM pg_catalog.pg_stat_bgwriter
metrics:
- checkpoints_timed:
usage: "COUNTER"
description: "Number of scheduled checkpoints that have been performed"
- checkpoints_req:
usage: "COUNTER"
description: "Number of requested checkpoints that have been performed"
- checkpoint_write_time:
usage: "COUNTER"
description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
- checkpoint_sync_time:
usage: "COUNTER"
description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
- buffers_checkpoint:
usage: "COUNTER"
description: "Number of buffers written during checkpoints"
- buffers_clean:
usage: "COUNTER"
description: "Number of buffers written by the background writer"
- maxwritten_clean:
usage: "COUNTER"
description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
- buffers_backend:
usage: "COUNTER"
description: "Number of buffers written directly by a backend"
- buffers_backend_fsync:
usage: "COUNTER"
description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)"
- buffers_alloc:
usage: "COUNTER"
description: "Number of buffers allocated"
pg_stat_database:
query: |
SELECT datname
, xact_commit
, xact_rollback
, blks_read
, blks_hit
, tup_returned
, tup_fetched
, tup_inserted
, tup_updated
, tup_deleted
, conflicts
, temp_files
, temp_bytes
, deadlocks
, blk_read_time
, blk_write_time
FROM pg_catalog.pg_stat_database
metrics:
- datname:
usage: "LABEL"
description: "Name of this database"
- xact_commit:
usage: "COUNTER"
description: "Number of transactions in this database that have been committed"
- xact_rollback:
usage: "COUNTER"
description: "Number of transactions in this database that have been rolled back"
- blks_read:
usage: "COUNTER"
description: "Number of disk blocks read in this database"
- blks_hit:
usage: "COUNTER"
description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
- tup_returned:
usage: "COUNTER"
description: "Number of rows returned by queries in this database"
- tup_fetched:
usage: "COUNTER"
description: "Number of rows fetched by queries in this database"
- tup_inserted:
usage: "COUNTER"
description: "Number of rows inserted by queries in this database"
- tup_updated:
usage: "COUNTER"
description: "Number of rows updated by queries in this database"
- tup_deleted:
usage: "COUNTER"
description: "Number of rows deleted by queries in this database"
- conflicts:
usage: "COUNTER"
description: "Number of queries canceled due to conflicts with recovery in this database"
- temp_files:
usage: "COUNTER"
description: "Number of temporary files created by queries in this database"
- temp_bytes:
usage: "COUNTER"
description: "Total amount of data written to temporary files by queries in this database"
- deadlocks:
usage: "COUNTER"
description: "Number of deadlocks detected in this database"
- blk_read_time:
usage: "COUNTER"
description: "Time spent reading data file blocks by backends in this database, in milliseconds"
- blk_write_time:
usage: "COUNTER"
description: "Time spent writing data file blocks by backends in this database, in milliseconds"
pg_stat_replication:
primary: true
query: |
SELECT usename
, COALESCE(application_name, '') AS application_name
, COALESCE(client_addr::text, '') AS client_addr
, COALESCE(client_port::text, '') AS client_port
, EXTRACT(EPOCH FROM backend_start) AS backend_start
, COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
, pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
, pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes
, pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes
, COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes
, COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds
, COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds
, COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds
FROM pg_catalog.pg_stat_replication
metrics:
- usename:
usage: "LABEL"
description: "Name of the replication user"
- application_name:
usage: "LABEL"
description: "Name of the application"
- client_addr:
usage: "LABEL"
description: "Client IP address"
- client_port:
usage: "LABEL"
description: "Client TCP port"
- backend_start:
usage: "COUNTER"
description: "Time when this process was started"
- backend_xmin_age:
usage: "COUNTER"
description: "The age of this standby's xmin horizon"
- sent_diff_bytes:
usage: "GAUGE"
description: "Difference in bytes from the last write-ahead log location sent on this connection"
- write_diff_bytes:
usage: "GAUGE"
description: "Difference in bytes from the last write-ahead log location written to disk by this standby server"
- flush_diff_bytes:
usage: "GAUGE"
description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server"
- replay_diff_bytes:
usage: "GAUGE"
description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server"
- write_lag_seconds:
usage: "GAUGE"
description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it"
- flush_lag_seconds:
usage: "GAUGE"
description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it"
- replay_lag_seconds:
usage: "GAUGE"
description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it"
pg_settings:
query: |
SELECT name,
CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
FROM pg_catalog.pg_settings
WHERE vartype IN ('integer', 'real', 'bool')
ORDER BY 1
metrics:
- name:
usage: "LABEL"
description: "Name of the setting"
- setting:
usage: "GAUGE"
description: "Setting value"

View File

@ -11,6 +11,10 @@ dependencies:
- name: common
repository: https://library-charts.truecharts.org
version: 14.3.5
- name: metallb
repository: https://metallb.github.io/metallb
version: 0.13.12
alias: metallb
kubeVersion: ">=1.16.0-0"
maintainers:
- email: info@truecharts.org
@ -22,7 +26,7 @@ sources:
- https://github.com/metallb/metallb
- https://metallb.universe.tf
type: application
version: 10.0.11
version: 11.0.0
annotations:
truecharts.org/category: operators
truecharts.org/SCALE-support: "true"

View File

@ -1,162 +0,0 @@
{{- define "metallb.webhooks" -}}
{{- $labels := (include "tc.v1.common.lib.metadata.allLabels" $ | fromYaml) }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: metallb-webhook-configuration
labels:
{{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $ "labels" $labels) | trim) }}
{{- . | nindent 4 }}
{{- end }}
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta1-addresspool
failurePolicy: {{ .Values.validationFailurePolicy }}
name: addresspoolvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- addresspools
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta2-bgppeer
failurePolicy: {{ .Values.validationFailurePolicy }}
name: bgppeervalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta2
operations:
- CREATE
- UPDATE
resources:
- bgppeers
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta1-ipaddresspool
failurePolicy: {{ .Values.validationFailurePolicy }}
name: ipaddresspoolvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- ipaddresspools
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta1-bgpadvertisement
failurePolicy: {{ .Values.validationFailurePolicy }}
name: bgpadvertisementvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- bgpadvertisements
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta1-community
failurePolicy: {{ .Values.validationFailurePolicy }}
name: communityvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- communities
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta1-bfdprofile
failurePolicy: {{ .Values.validationFailurePolicy }}
name: bfdprofilevalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- DELETE
resources:
- bfdprofiles
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
namespace: {{ .Release.Namespace }}
path: /validate-metallb-io-v1beta1-l2advertisement
failurePolicy: {{ .Values.validationFailurePolicy }}
name: l2advertisementvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- l2advertisements
sideEffects: None
---
apiVersion: v1
kind: Secret
metadata:
name: webhook-server-cert
labels:
{{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $ "labels" $labels) | trim) }}
{{- . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,7 +1,5 @@
{{/* Make sure all variables are set properly */}}
{{- include "tc.v1.common.loader.init" . }}
{{- include "metallb.webhooks" . -}}
{{/* Render the templates */}}
{{ include "tc.v1.common.loader.apply" . }}

File diff suppressed because it is too large Load Diff

View File

@ -1,347 +1,25 @@
image:
repository: tccr.io/truecharts/metallb-controller
tag: v0.13.12@sha256:0648f93b8c42b6531b05b1f2c197fd16d367b7a0c2b8b474b15fcb55baaf6867
pullPolicy:
speakerImage:
repository: tccr.io/truecharts/metallb-speaker
tag: v0.13.12@sha256:92d801d823bfe74e2e129573c9f88f9157616bd4128fddec585d5b6c0dc99bd8
pullPolicy:
workload:
main:
strategy: RollingUpdate
labels:
app.kubernetes.io/component: controller
podSpec:
labels:
app.kubernetes.io/component: controller
containers:
main:
args:
- --port=7472
- --log-level=all
- --cert-service-name={{ include "tc.v1.common.lib.chart.names.fullname" $ }}
- --webhook-mode=enabled
probes:
liveness:
port: controllermon
path: /metrics
readiness:
port: controllermon
path: /metrics
startup:
port: controllermon
type: tcp
env:
METALLB_ML_SECRET_NAME: "memberlist"
METALLB_DEPLOYMENT: '{{ include "tc.v1.common.lib.chart.names.fullname" $ }}'
METALLB_NAMESPACE: "{{$.Release.Namespace}}"
speaker:
enabled: true
type: DaemonSet
strategy: RollingUpdate
labels:
app.kubernetes.io/component: controller
podSpec:
labels:
app.kubernetes.io/component: controller
shareProcessNamespace: true
hostNetwork: true
containers:
speaker:
enabled: true
primary: true
imageSelector: speakerImage
args:
- --port=7473
- --log-level=all
probes:
liveness:
port: speakermon
path: /metrics
readiness:
port: speakermon
path: /metrics
startup:
port: speakermon
type: tcp
env:
METALLB_NODE_NAME:
fieldRef:
fieldPath: spec.nodeName
METALLB_HOST:
fieldRef:
fieldPath: status.hostIP
METALLB_ML_BIND_ADDR:
fieldRef:
fieldPath: status.podIP
METALLB_ML_LABELS: "release={{ $.Release.Name }},app.kubernetes.io/component=speaker"
METALLB_ML_BIND_PORT: "{{ $.Values.service.memberlist.ports.memberlisttcp.port }}"
METALLB_ML_SECRET_KEY_PATH: "/etc/ml_secret_key"
METALLB_NAMESPACE: "{{$.Release.Namespace}}"
securityContext:
runAsUser: 0
capabilities:
add:
- NET_RAW
podOptions:
automountServiceAccountToken: true
repository: tccr.io/truecharts/alpine
pullPolicy: IfNotPresent
tag: latest@sha256:17cd77e25d3fa829d168caec4db7bb5b52ceeb935d8ca0d1180de6f615553dc4
service:
main:
enabled: false
ports:
main:
port: 443
targetPort: 9443
memberlist:
enabled: true
targetSelector: speaker
ports:
memberlisttcp:
enabled: true
protocol: tcp
port: 7946
memberlistudp:
enabled: true
protocol: udp
port: 7946
speakermon:
enabled: true
targetSelector: speaker
clusterIP: None
ports:
speakermon:
enabled: true
port: 7473
controllermon:
enabled: true
clusterIP: None
ports:
controllermon:
enabled: true
port: 7472
enabled: false
operator:
register: true
workload:
main:
enabled: false
configmap:
metallb-excludel2:
enabled: true
data:
excludel2.yaml: |
announcedInterfacesToExclude:
- docker.*
- cbr.*
- dummy.*
- virbr.*
- lxcbr.*
- veth.*
- lo
- ^cali.*
- ^tunl.*
- flannel.*
- kube-ipvs.*
- cni.*
- ^nodelocaldns.*
persistence:
webhook-server-cert:
enabled: true
type: secret
objectName: webhook-server-cert
expandObjectName: false
defaultMode: "0420"
readOnly: true
targetSelector:
main:
main:
mountPath: "/tmp/k8s-webhook-server/serving-certs"
metallb-excludel2:
enabled: "{{ if $.Values.speaker.excludeInterfaces.enabled }}true{{ else }}false{{ end }}"
type: configmap
objectName: metallb-excludel2
defaultMode: "0256"
readOnly: true
targetSelector:
speaker:
speaker:
mountPath: "/etc/metallb"
memberlist:
enabled: true
type: secret
objectName: memberlist
expandObjectName: false
defaultMode: "0420"
targetSelector:
speaker:
speaker:
mountPath: "/etc/ml_secret_key"
portal:
open:
enabled: false
# -- Whether Role Based Access Control objects like roles and rolebindings should be created
rbac:
main:
enabled: true
primary: true
clusterWide: true
allServiceAccounts: true
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "nodes", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["services/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources:
["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
resourceNames: ["metallb-webhook-configuration"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources:
["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["list", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
resourceNames:
[
"addresspools.metallb.io",
"bfdprofiles.metallb.io",
"bgpadvertisements.metallb.io",
"bgppeers.metallb.io",
"ipaddresspools.metallb.io",
"l2advertisements.metallb.io",
"communities.metallb.io",
]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["list", "watch"]
- apiGroups: ["discovery.k8s.io"]
resources: ["endpointslices"]
verbs: ["get", "list", "watch"]
controller:
enabled: true
primary: false
clusterWide: false
serviceAccounts:
- main
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["list"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
- apiGroups: ["metallb.io"]
resources: ["addresspools"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["ipaddresspools"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["bgppeers"]
verbs: ["get", "list"]
- apiGroups: ["metallb.io"]
resources: ["bgpadvertisements"]
verbs: ["get", "list"]
- apiGroups: ["metallb.io"]
resources: ["l2advertisements"]
verbs: ["get", "list"]
- apiGroups: ["metallb.io"]
resources: ["communities"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["bfdprofiles"]
verbs: ["get", "list", "watch"]
pod-lister:
enabled: true
primary: false
clusterWide: false
serviceAccounts:
- speaker
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["addresspools"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["bfdprofiles"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["bgppeers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["l2advertisements"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["bgpadvertisements"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["ipaddresspools"]
verbs: ["get", "list", "watch"]
- apiGroups: ["metallb.io"]
resources: ["communities"]
verbs: ["get", "list", "watch"]
# -- The service account the pods will use to interact with the Kubernetes API
serviceAccount:
main:
enabled: true
primary: true
targetSelector:
- main
speaker:
enabled: true
primary: false
targetSelector:
- speaker
# controller contains configuration specific to the MetalLB cluster
# controller.
controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
speaker:
enabled: true
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
excludeInterfaces:
enabled: true
validationFailurePolicy: Fail
operator:
register: true
manifestManager:
enabled: false