chore(prometheus): move all exporters to KPS dependency and get rid of some self-build dependencies

This commit is contained in:
Kjeld Schouten 2024-02-27 15:36:57 +01:00
parent 00389e8fb0
commit 4415194797
28 changed files with 128 additions and 1356 deletions

View File

@ -1,30 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# OWNERS file for Kubernetes
OWNERS
# helm-docs templates
*.gotmpl
# docs folder
/docs
# icon
icon.png

View File

@ -1,37 +0,0 @@
annotations:
max_scale_version: 24.04.0
min_scale_version: 23.10.0
truecharts.org/SCALE-support: "true"
truecharts.org/category: metrics
truecharts.org/max_helm_version: "3.14"
truecharts.org/min_helm_version: "3.12"
truecharts.org/train: dependency
apiVersion: v2
appVersion: 2.10.1
dependencies:
- name: common
version: 18.0.3
repository: oci://tccr.io/truecharts
condition: ""
alias: ""
tags: []
import-values: []
deprecated: false
description: kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects.
home: https://truecharts.org/charts/dependency/kube-state-metrics
icon: https://truecharts.org/img/hotlink-ok/chart-icons/kube-state-metrics.png
keywords:
- prometheus
- kube-state-metrics
- monitoring
kubeVersion: ">=1.24.0-0"
maintainers:
- name: TrueCharts
email: info@truecharts.org
url: https://truecharts.org
name: kube-state-metrics
sources:
- https://github.com/truecharts/charts/tree/master/charts/dependency/kube-state-metrics
- https://hub.docker.com/r/bitnami/kube-state-metrics
type: application
version: 7.1.4

View File

@ -1,28 +0,0 @@
---
title: README
---
## General Info
TrueCharts can be installed as both _normal_ Helm Charts or as Apps on TrueNAS SCALE.
However only installations using the TrueNAS SCALE Apps system are supported.
For more information about this App, please check the docs on the TrueCharts [website](https://truecharts.org/charts/dependency/kube-state-metrics)
**This chart is not maintained by the upstream project and any issues with the chart should be raised [here](https://github.com/truecharts/charts/issues/new/choose)**
## Support
- Please check our [quick-start guides for TrueNAS SCALE](https://truecharts.org/manual/SCALE/guides/scale-intro).
- See the [Website](https://truecharts.org)
- Check our [Discord](https://discord.gg/tVsPTHWTtr)
- Open a [issue](https://github.com/truecharts/charts/issues/new/choose)
---
## Sponsor TrueCharts
TrueCharts can only exist due to the incredible effort of our staff.
Please consider making a [donation](https://truecharts.org/sponsor) or contributing back to the project any way you can!
_All Rights Reserved - The TrueCharts Project_

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.5 KiB

View File

@ -1,102 +0,0 @@
# Include{groups}
questions:
# Include{global}
# Include{workload}
# Include{workloadDeployment}
# Include{replicas1}
# Include{podSpec}
# Include{containerMain}
# Include{containerBasic}
# Include{containerAdvanced}
# Include{containerConfig}
# Include{podOptions}
# Include{serviceRoot}
- variable: main
label: "Main Service"
description: "The Primary service on which the healthcheck runs, often the webUI"
schema:
additional_attrs: true
type: dict
attrs:
# Include{serviceSelectorClusterIP}
# Include{serviceSelectorExtras}
- variable: main
label: "Main Service Port Configuration"
schema:
additional_attrs: true
type: dict
attrs:
- variable: port
label: "Port"
description: "This port exposes the container port on the service"
schema:
type: int
default: 8080
required: true
- variable: selfmonitor
label: "selfmonitor Service"
description: "The Primary service on which the healthcheck runs, often the webUI"
schema:
additional_attrs: true
type: dict
attrs:
# Include{serviceSelectorClusterIP}
# Include{serviceSelectorExtras}
- variable: selfmonitor
label: "selfmonitor Service Port Configuration"
schema:
additional_attrs: true
type: dict
attrs:
- variable: port
label: "Port"
description: "This port exposes the container port on the service"
schema:
type: int
default: 8081
required: true
# Include{serviceExpertRoot}
# Include{serviceExpert}
# Include{serviceList}
# Include{persistenceList}
# Include{ingressRoot}
- variable: main
label: "Main Ingress"
schema:
additional_attrs: true
type: dict
attrs:
# Include{ingressDefault}
# Include{ingressAdvanced}
# Include{ingressList}
# Include{securityContextRoot}
- variable: runAsUser
label: "runAsUser"
description: "The UserID of the user running the application"
schema:
type: int
default: 568
- variable: runAsGroup
label: "runAsGroup"
description: "The groupID of the user running the application"
schema:
type: int
default: 568
# Include{securityContextContainer}
# Include{securityContextAdvanced}
# Include{securityContextPod}
- variable: fsGroup
label: "fsGroup"
description: "The group that should own ALL storage."
schema:
type: int
default: 568
# Include{resources}
# Include{metrics}
# Include{prometheusRule}
# Include{advanced}
# Include{addons}
# Include{codeserver}
# Include{netshoot}
# Include{vpn}
# Include{documentation}

View File

@ -1 +0,0 @@
{{- include "tc.v1.common.lib.chart.notes" $ -}}

View File

@ -1 +0,0 @@
{{ include "tc.v1.common.loader.all" . }}

View File

@ -1,306 +0,0 @@
image:
repository: bitnami/kube-state-metrics
pullPolicy: IfNotPresent
tag: 2.10.1@sha256:4a1ead75b43275fcdaa1651f85fc7f52ea5f17ff9a81d794659efbb33c72eff6
service:
main:
ports:
main:
protocol: http
port: 8080
selfmonitor:
enabled: true
ports:
selfmonitor:
enabled: true
protocol: http
port: 8081
workload:
main:
podSpec:
containers:
main:
args:
- --resources=certificatesigningrequests
- --resources=configmaps
- --resources=cronjobs
- --resources=daemonsets
- --resources=deployments
- --resources=endpoints
- --resources=horizontalpodautoscalers
- --resources=ingresses
- --resources=jobs
- --resources=limitranges
- --resources=mutatingwebhookconfigurations
- --resources=namespaces
- --resources=networkpolicies
- --resources=nodes
- --resources=persistentvolumeclaims
- --resources=persistentvolumes
- --resources=poddisruptionbudgets
- --resources=pods
- --resources=replicasets
- --resources=replicationcontrollers
- --resources=resourcequotas
- --resources=secrets
- --resources=services
- --resources=statefulsets
- --resources=storageclasses
# - --resources=verticalpodautoscalers
- --resources=validatingwebhookconfigurations
- --resources=volumeattachments
probes:
liveness:
path: /healthz
port: main
readiness:
path: /healthz
port: main
startup:
type: tcp
port: main
podOptions:
automountServiceAccountToken: true
serviceAccount:
main:
enabled: true
primary: true
rbac:
main:
enabled: true
primary: true
clusterWide: true
rules:
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- list
- watch
- apiGroups:
- extensions
- apps
resources:
- daemonsets
verbs:
- list
- watch
- apiGroups:
- extensions
- apps
resources:
- deployments
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- jobs
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- limitranges
verbs:
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- watch
- apiGroups:
- extensions
- apps
resources:
- replicasets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- resourcequotas
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- list
- watch
- apiGroups:
- autoscaling.k8s.io
resources:
- verticalpodautoscalers
verbs:
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- list
- watch
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs: ["list", "watch"]
metrics:
main:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: true
type: "servicemonitor"
endpoints:
- port: main
- port: selfmonitor
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: UnifiPollerAbsent
# annotations:
# description: Unifi Poller has disappeared from Prometheus service discovery.
# summary: Unifi Poller is down.
# expr: |
# absent(up{job=~".*unifi-poller.*"} == 1)
# for: 5m
# labels:
# severity: critical
portal:
open:
enabled: false

View File

@ -1,30 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# OWNERS file for Kubernetes
OWNERS
# helm-docs templates
*.gotmpl
# docs folder
/docs
# icon
icon.png

View File

@ -1,37 +0,0 @@
annotations:
max_scale_version: 24.04.0
min_scale_version: 23.10.0
truecharts.org/SCALE-support: "true"
truecharts.org/category: metrics
truecharts.org/max_helm_version: "3.14"
truecharts.org/min_helm_version: "3.12"
truecharts.org/train: dependency
apiVersion: v2
appVersion: 1.7.0
dependencies:
- name: common
version: 18.0.3
repository: oci://tccr.io/truecharts
condition: ""
alias: ""
tags: []
import-values: []
deprecated: false
description: Prometheus exporter for hardware and OS metrics exposed by UNIX kernels, with pluggable metric collectors.
home: https://truecharts.org/charts/dependency/node-exporter
icon: https://truecharts.org/img/hotlink-ok/chart-icons/node-exporter.png
keywords:
- prometheus
- node-exporter
- monitoring
kubeVersion: ">=1.24.0-0"
maintainers:
- name: TrueCharts
email: info@truecharts.org
url: https://truecharts.org
name: node-exporter
sources:
- https://github.com/truecharts/charts/tree/master/charts/dependency/node-exporter
- https://hub.docker.com/r/bitnami/node-exporter
type: application
version: 7.1.4

View File

@ -1,28 +0,0 @@
---
title: README
---
## General Info
TrueCharts can be installed as both _normal_ Helm Charts or as Apps on TrueNAS SCALE.
However only installations using the TrueNAS SCALE Apps system are supported.
For more information about this App, please check the docs on the TrueCharts [website](https://truecharts.org/charts/dependency/node-exporter)
**This chart is not maintained by the upstream project and any issues with the chart should be raised [here](https://github.com/truecharts/charts/issues/new/choose)**
## Support
- Please check our [quick-start guides for TrueNAS SCALE](https://truecharts.org/manual/SCALE/guides/scale-intro).
- See the [Website](https://truecharts.org)
- Check our [Discord](https://discord.gg/tVsPTHWTtr)
- Open a [issue](https://github.com/truecharts/charts/issues/new/choose)
---
## Sponsor TrueCharts
TrueCharts can only exist due to the incredible effort of our staff.
Please consider making a [donation](https://truecharts.org/sponsor) or contributing back to the project any way you can!
_All Rights Reserved - The TrueCharts Project_

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.5 KiB

View File

@ -1,80 +0,0 @@
# Include{groups}
questions:
# Include{global}
# Include{workload}
# Include{workloadDeployment}
# Include{replicas1}
# Include{podSpec}
# Include{containerMain}
# Include{containerBasic}
# Include{containerAdvanced}
# Include{containerConfig}
# Include{podOptions}
# Include{serviceRoot}
- variable: main
label: "Main Service"
description: "The Primary service on which the healthcheck runs, often the webUI"
schema:
additional_attrs: true
type: dict
attrs:
# Include{serviceSelectorLoadBalancer}
# Include{serviceSelectorExtras}
- variable: main
label: "Main Service Port Configuration"
schema:
additional_attrs: true
type: dict
attrs:
- variable: port
label: "Port"
description: "This port exposes the container port on the service"
schema:
type: int
default: 9100
required: true
# Include{serviceExpertRoot}
# Include{serviceExpert}
# Include{serviceList}
# Include{persistenceList}
# Include{ingressRoot}
- variable: main
label: "Main Ingress"
schema:
additional_attrs: true
type: dict
attrs:
# Include{ingressDefault}
# Include{ingressAdvanced}
# Include{ingressList}
# Include{securityContextRoot}
- variable: runAsUser
label: "runAsUser"
description: "The UserID of the user running the application"
schema:
type: int
default: 568
- variable: runAsGroup
label: "runAsGroup"
description: "The groupID of the user running the application"
schema:
type: int
default: 568
# Include{securityContextContainer}
# Include{securityContextAdvanced}
# Include{securityContextPod}
- variable: fsGroup
label: "fsGroup"
description: "The group that should own ALL storage."
schema:
type: int
default: 568
# Include{resources}
# Include{metrics}
# Include{prometheusRule}
# Include{advanced}
# Include{addons}
# Include{codeserver}
# Include{netshoot}
# Include{vpn}
# Include{documentation}

View File

@ -1 +0,0 @@
{{- include "tc.v1.common.lib.chart.notes" $ -}}

View File

@ -1 +0,0 @@
{{ include "tc.v1.common.loader.all" . }}

View File

@ -1,86 +0,0 @@
image:
repository: bitnami/node-exporter
pullPolicy: IfNotPresent
tag: 1.7.0@sha256:6d28e7371aeb5785907e4e5461bd07324d2f8a8c45dff25887252281f5c1d13d
service:
main:
ports:
main:
protocol: http
port: 9910
workload:
main:
type: DaemonSet
podSpec:
containers:
main:
args:
- --path.rootfs=/host
- --path.procfs=/hostproc
- --path.sysfs=/hostsys
- --web.listen-address=0.0.0.0:{{ .Values.service.main.ports.main.port }}
- --collector.filesystem.mount-points-exclude="^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+|var/db/system/.+|mnt/[a-zA-Z0-9-_\\.]+/ix-applications/.+)($|/)"
- --collector.filesystem.fs-types-exclude="^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"
- --collector.netdev.device-exclude="^veth.*$"
- --collector.netclass.ignored-devices="^veth.*$"
probes:
liveness:
path: /
port: main
readiness:
path: /
port: main
startup:
type: tcp
port: main
podOptions:
hostNetwork: true
hostPID: true
persistence:
host:
enabled: true
type: hostPath
hostPath: /
mountPath: /host
readOnly: true
proc:
enabled: true
type: hostPath
hostPath: /proc
mountPath: /hostproc
readOnly: true
sys:
enabled: true
type: hostPath
hostPath: /sys
mountPath: /hostsys
readOnly: true
metrics:
main:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: true
type: "servicemonitor"
endpoints:
- port: main
path: /metrics
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: UnifiPollerAbsent
# annotations:
# description: Unifi Poller has disappeared from Prometheus service discovery.
# summary: Unifi Poller is down.
# expr: |
# absent(up{job=~".*unifi-poller.*"} == 1)
# for: 5m
# labels:
# severity: critical
portal:
open:
enabled: false

View File

@ -16,18 +16,11 @@ dependencies:
alias: ""
tags: []
import-values: []
- name: node-exporter
version: 6.3.0
repository: oci://tccr.io/truecharts
condition: exporters.enabled,exporters.node-exporter.enabled
alias: ""
tags: []
import-values: []
- name: kube-state-metrics
version: 6.3.0
repository: oci://tccr.io/truecharts
condition: exporters.enabled,exporters.kube-state-metrics.enabled
alias: ""
- name: kube-prometheus-stack
version: 56.9.0
repository: oci://ghcr.io/prometheus-community/charts
condition: ""
alias: kps
tags: []
import-values: []
deprecated: false
@ -50,4 +43,4 @@ sources:
- https://quay.io/thanos/thanos
- https://hub.docker.com/r/bitnami/alertmanager
type: application
version: 17.4.1
version: 17.4.2

View File

@ -1,22 +0,0 @@
{{- if and .Values.coreDns.enabled .Values.coreDns.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kube-prometheus.fullname" . }}-coredns
namespace: {{ .Values.coreDns.namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-coredns
spec:
clusterIP: None
ports:
- name: http-metrics
port: {{ .Values.coreDns.service.port }}
protocol: TCP
targetPort: {{ .Values.coreDns.service.targetPort }}
selector:
{{- if .Values.coreDns.service.selector }}
{{ toYaml .Values.coreDns.service.selector | indent 4 }}
{{- else}}
k8s-app: kube-dns
{{- end}}
{{- end }}

View File

@ -1,29 +0,0 @@
{{- if .Values.coreDns.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus.fullname" . }}-coredns
namespace: {{ .Release.Namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-coredns
spec:
jobLabel: k8s-app
selector:
matchLabels:
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-coredns
namespaceSelector:
matchNames:
- {{ .Values.coreDns.namespace }}
endpoints:
- port: http-metrics
{{- if .Values.coreDns.serviceMonitor.interval}}
interval: {{ .Values.coreDns.serviceMonitor.interval }}
{{- end }}
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- if .Values.coreDns.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" (dict "value" .Values.coreDns.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
{{- end }}
{{- if .Values.coreDns.serviceMonitor.relabelings }}
relabelings: {{- include "tc.v1.common.tplvalues.render" (dict "value" .Values.coreDns.serviceMonitor.relabelings "context" $) | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -1,35 +0,0 @@
{{- if .Values.kubeApiServer.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus.fullname" . }}-apiserver
namespace: {{ .Release.Namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: apiserver
spec:
jobLabel: component
selector:
matchLabels:
component: apiserver
provider: kubernetes
namespaceSelector:
matchNames:
- default
endpoints:
- port: https
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
insecureSkipVerify: true
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- if .Values.kubeApiServer.serviceMonitor.interval }}
interval: {{ .Values.kubeApiServer.serviceMonitor.interval }}
{{- end }}
{{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" ( dict "value" .Values.kubeApiServer.serviceMonitor.metricRelabelings "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.kubeApiServer.serviceMonitor.relabelings }}
relabelings: {{- toYaml .Values.kubeApiServer.serviceMonitor.relabelings | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints }}
apiVersion: v1
kind: Endpoints
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kube-controller-manager
namespace: {{ .Values.kubeControllerManager.namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: kube-controller-manager
subsets:
- addresses:
{{- range .Values.kubeControllerManager.endpoints }}
- ip: {{ . }}
{{- end }}
ports:
- name: http-metrics
port: {{ .Values.kubeControllerManager.service.port }}
protocol: TCP
{{- end }}

View File

@ -1,25 +0,0 @@
{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kube-controller-manager
namespace: {{ .Values.kubeControllerManager.namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-kube-controller-manager
spec:
clusterIP: None
ports:
- name: http-metrics
port: {{ .Values.kubeControllerManager.service.port }}
protocol: TCP
targetPort: {{ .Values.kubeControllerManager.service.targetPort }}
{{- if .Values.kubeControllerManager.endpoints }}{{- else }}
selector:
{{- if .Values.kubeControllerManager.service.selector }}
{{ toYaml .Values.kubeControllerManager.service.selector | indent 4 }}
{{- else}}
component: kube-controller-manager
{{- end}}
{{- end }}
type: ClusterIP
{{- end }}

View File

@ -1,40 +0,0 @@
{{- if .Values.kubeControllerManager.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kube-controller-manager
namespace: {{ .Release.Namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-kube-controller-manager
spec:
jobLabel: component
selector:
matchLabels:
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-kube-controller-manager
namespaceSelector:
matchNames:
- {{ .Values.kubeControllerManager.namespace }}
endpoints:
- port: http-metrics
{{- if .Values.kubeControllerManager.serviceMonitor.interval }}
interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }}
{{- end }}
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- if .Values.kubeControllerManager.serviceMonitor.https }}
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
{{- if .Values.kubeControllerManager.serviceMonitor.insecureSkipVerify }}
insecureSkipVerify: {{ .Values.kubeControllerManager.serviceMonitor.insecureSkipVerify }}
{{- end }}
{{- if .Values.kubeControllerManager.serviceMonitor.serverName }}
serverName: {{ .Values.kubeControllerManager.serviceMonitor.serverName }}
{{- end }}
{{- end }}
{{- if .Values.kubeControllerManager.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" (dict "value" .Values.kubeControllerManager.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
{{- end }}
{{- if .Values.kubeControllerManager.serviceMonitor.relabelings }}
relabelings: {{- include "tc.v1.common.tplvalues.render" (dict "value" .Values.kubeControllerManager.serviceMonitor.relabelings "context" $) | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints }}
apiVersion: v1
kind: Endpoints
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kube-scheduler
namespace: {{ .Values.kubeScheduler.namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: kube-scheduler
subsets:
- addresses:
{{- range .Values.kubeScheduler.endpoints }}
- ip: {{ . }}
{{- end }}
ports:
- name: http-metrics
port: {{ .Values.kubeScheduler.service.port }}
protocol: TCP
{{- end }}

View File

@ -1,25 +0,0 @@
{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kube-scheduler
namespace: {{ .Values.kubeScheduler.namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-kube-scheduler
spec:
clusterIP: None
ports:
- name: http-metrics
port: {{ .Values.kubeScheduler.service.port}}
protocol: TCP
targetPort: {{ .Values.kubeScheduler.service.targetPort}}
{{- if .Values.kubeScheduler.endpoints }}{{- else }}
selector:
{{- if .Values.kubeScheduler.service.selector }}
{{ toYaml .Values.kubeScheduler.service.selector | indent 4 }}
{{- else}}
component: kube-scheduler
{{- end}}
{{- end }}
type: ClusterIP
{{- end -}}

View File

@ -1,40 +0,0 @@
{{- if .Values.kubeScheduler.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kube-scheduler
namespace: {{ .Release.Namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-kube-scheduler
spec:
jobLabel: component
selector:
matchLabels:
app.kubernetes.io/component: {{ template "kube-prometheus.fullname" . }}-kube-scheduler
namespaceSelector:
matchNames:
- {{ .Values.kubeScheduler.namespace }}
endpoints:
- port: http-metrics
{{- if .Values.kubeScheduler.serviceMonitor.interval }}
interval: {{ .Values.kubeScheduler.serviceMonitor.interval }}
{{- end }}
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- if .Values.kubeScheduler.serviceMonitor.https }}
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
{{- if .Values.kubeScheduler.serviceMonitor.insecureSkipVerify }}
insecureSkipVerify: {{ .Values.kubeScheduler.serviceMonitor.insecureSkipVerify }}
{{- end}}
{{- if .Values.kubeScheduler.serviceMonitor.serverName }}
serverName: {{ .Values.kubeScheduler.serviceMonitor.serverName }}
{{- end}}
{{- end}}
{{- if .Values.kubeScheduler.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" (dict "value" .Values.kubeScheduler.serviceMonitor.metricRelabelings "context" $) | nindent 6 }}
{{- end }}
{{- if .Values.kubeScheduler.serviceMonitor.relabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" (dict "value" .Values.kubeScheduler.serviceMonitor.relabelings "context" $) | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -1,85 +0,0 @@
{{- if .Values.kubelet.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus.fullname" . }}-kubelet
namespace: {{ .Release.Namespace }}
labels: {{- include "kube-prometheus.labels" . | nindent 4 }}
app.kubernetes.io/component: kubelet
spec:
jobLabel: k8s-app
selector:
matchLabels:
k8s-app: kubelet
namespaceSelector:
matchNames:
- {{ .Values.kubelet.namespace }}
endpoints:
{{- if .Values.kubelet.serviceMonitor.https }}
- port: https-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
insecureSkipVerify: true
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
{{- if .Values.kubelet.serviceMonitor.interval }}
interval: {{ .Values.kubelet.serviceMonitor.interval }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" ( dict "value" .Values.kubelet.serviceMonitor.metricRelabelings "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.relabelings }}
relabelings: {{- toYaml .Values.kubelet.serviceMonitor.relabelings | nindent 8 }}
{{- end }}
- port: https-metrics
path: /metrics/cadvisor
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
insecureSkipVerify: true
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
{{- if .Values.kubelet.serviceMonitor.interval }}
interval: {{ .Values.kubelet.serviceMonitor.interval }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" ( dict "value" .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.cAdvisorRelabelings }}
relabelings: {{- toYaml .Values.kubelet.serviceMonitor.cAdvisorRelabelings | nindent 8 }}
{{- end }}
{{- else }}
- port: http-metrics
scheme: http
tlsConfig:
insecureSkipVerify: false
honorLabels: true
{{- if .Values.kubelet.serviceMonitor.interval }}
interval: {{ .Values.kubelet.serviceMonitor.interval }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" ( dict "value" .Values.kubelet.serviceMonitor.metricRelabelings "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.relabelings }}
relabelings: {{- toYaml .Values.kubelet.serviceMonitor.relabelings | nindent 8 }}
{{- end }}
- port: http-metrics
path: /metrics/cadvisor
scheme: http
tlsConfig:
insecureSkipVerify: false
honorLabels: true
{{- if .Values.kubelet.serviceMonitor.interval }}
interval: {{ .Values.kubelet.serviceMonitor.interval }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings }}
metricRelabelings: {{- include "tc.v1.common.tplvalues.render" ( dict "value" .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings "context" $) | nindent 8 }}
{{- end }}
{{- if .Values.kubelet.serviceMonitor.cAdvisorRelabelings }}
relabelings: {{- toYaml .Values.kubelet.serviceMonitor.cAdvisorRelabelings | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -54,6 +54,125 @@ grafana:
scrapeInterval: "30s"
uid: "prometheus"
kps:
## Install Prometheus Operator CRDs
##
crds:
enabled: false
## Manages Prometheus and Alertmanager components
##
prometheusOperator:
enabled: false
##
global:
rbac:
create: true
## Create default rules for monitoring the cluster
##
defaultRules:
create: true
windowsMonitoring:
## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
enabled: false
## Configuration for prometheus-windows-exporter
## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
##
prometheus-windows-exporter:
## Enable ServiceMonitor and set Kubernetes label to use as a job label
##
prometheus:
monitor:
enabled: false
## Configuration for alertmanager
## ref: https://prometheus.io/docs/alerting/alertmanager/
##
alertmanager:
## Deploy alertmanager
##
enabled: false
## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
##
grafana:
enabled: false
forceDeployDashboards: true
defaultDashboardsEnabled: true
## Flag to disable all the kubernetes component scrapers
##
kubernetesServiceMonitors:
enabled: true
## Component scraping the kube api server
##
kubeApiServer:
enabled: true
## Component scraping the kubelet and kubelet-hosted cAdvisor
##
kubelet:
enabled: true
## Component scraping the kube controller manager
##
kubeControllerManager:
enabled: true
## Component scraping coreDns. Use either this or kubeDns
##
coreDns:
enabled: true
## Component scraping kubeDns. Use either this or coreDns
##
kubeDns:
enabled: false
## Component scraping etcd
##
kubeEtcd:
enabled: true
## Component scraping kube scheduler
##
kubeScheduler:
enabled: true
## Component scraping kube proxy
##
kubeProxy:
enabled: false
## Component scraping kube state metrics
##
kubeStateMetrics:
enabled: true
## dontDeploy node exporter as a daemonset to all nodes
##
nodeExporter:
enabled: true
## dont Deploy a Prometheus instance
##
prometheus:
enabled: false
## Configuration for thanosRuler
## ref: https://thanos.io/tip/components/rule.md/
##
thanosRuler:
## Dont Deploy thanosRuler
##
enabled: false
service:
main:
selectorLabels:
@ -205,8 +324,10 @@ serviceAccount:
main:
enabled: true
primary: true
securityContext:
readOnlyRootFilesystem: false
probes:
# -- Liveness probe configuration
# @default -- See below
@ -856,6 +977,7 @@ prometheus:
## @param prometheus.portName Port name used for the pods and governing service. This defaults to web
##
portName: main
####
## Alert Manager Config
####
@ -1133,245 +1255,7 @@ alertmanager:
## @param alertmanager.configSelector Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. This defaults to {}
##
configSelector: {}
####
## Exporters
####
## @section Exporters
## Exporters
##
exporters:
node-exporter:
## @param exporters.node-exporter.enabled Enable node-exporter
##
enabled: true
kube-state-metrics:
## @param exporters.kube-state-metrics.enabled Enable kube-state-metrics
##
enabled: true
## @param kube-state-metrics [object] Node Exporter deployment configuration
##
kube-state-metrics:
serviceMonitor:
enabled: true
honorLabels: true
## Component scraping for kubelet and kubelet hosted cAdvisor
##
kubelet:
## @param kubelet.enabled Create a ServiceMonitor to scrape kubelet service
##
enabled: true
## @param kubelet.namespace Namespace where kubelet service is deployed. Related configuration `operator.kubeletService.namespace`
##
namespace: kube-system
serviceMonitor:
## @param kubelet.serviceMonitor.https Enable scraping of the kubelet over HTTPS
##
https: true
## @param kubelet.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubelet.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubelet.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## @param kubelet.serviceMonitor.cAdvisorMetricRelabelings Metric relabeling for scraping cAdvisor
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
cAdvisorMetricRelabelings: []
## @param kubelet.serviceMonitor.cAdvisorRelabelings Relabel configs for scraping cAdvisor
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
cAdvisorRelabelings: []
## Component scraping the kube-apiserver
##
kubeApiServer:
## @param kubeApiServer.enabled Create a ServiceMonitor to scrape kube-apiserver service
##
enabled: true
serviceMonitor:
## @param kubeApiServer.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubeApiServer.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubeApiServer.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## Component scraping the kube-controller-manager
##
kubeControllerManager:
## @param kubeControllerManager.enabled Create a ServiceMonitor to scrape kube-controller-manager service
##
enabled: false
## @param kubeControllerManager.endpoints If your kube controller manager is not deployed as a pod, specify IPs it can be found on
## endpoints:
## - 10.141.4.22
## - 10.141.4.23
## - 10.141.4.24
##
endpoints: []
## @param kubeControllerManager.namespace Namespace where kube-controller-manager service is deployed.
##
namespace: kube-system
## Service ports and selector information
## @param kubeControllerManager.service.enabled Whether or not to create a Service object for kube-controller-manager
## @param kubeControllerManager.service.port Listening port of the kube-controller-manager Service object
## @param kubeControllerManager.service.targetPort Port to target on the kube-controller-manager Pods. This should be the port that kube-controller-manager is exposing metrics on
## @param kubeControllerManager.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
port: 10252
targetPort: 10252
## selector:
## component: kube-controller-manager
##
selector: {}
serviceMonitor:
## @param kubeControllerManager.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
##
interval: ""
## @param kubeControllerManager.serviceMonitor.https Enable scraping kube-controller-manager over https
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
##
https: false
## @param kubeControllerManager.serviceMonitor.insecureSkipVerify Skip TLS certificate validation when scraping
##
insecureSkipVerify: ""
## @param kubeControllerManager.serviceMonitor.serverName Name of the server to use when validating TLS certificate
serverName: ""
## @param kubeControllerManager.serviceMonitor.metricRelabelings Metric relabeling
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs
##
metricRelabelings: []
## @param kubeControllerManager.serviceMonitor.relabelings Relabel configs
## ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
##
relabelings: []
## Component scraping kube scheduler
##
kubeScheduler:
## @param kubeScheduler.enabled Create a ServiceMonitor to scrape kube-scheduler service
##
enabled: false
## @param kubeScheduler.endpoints If your kube scheduler is not deployed as a pod, specify IPs it can be found on
## endpoints:
## - 10.141.4.22
## - 10.141.4.23
## - 10.141.4.24
##
endpoints: []
## @param kubeScheduler.namespace Namespace where kube-scheduler service is deployed.
##
namespace: kube-system
## If using kubeScheduler.endpoints only the port and targetPort are used
## @param kubeScheduler.service.enabled Whether or not to create a Service object for kube-scheduler
## @param kubeScheduler.service.port Listening port of the kube scheduler Service object
## @param kubeScheduler.service.targetPort Port to target on the kube scheduler Pods. This should be the port that kube scheduler is exposing metrics on
## @param kubeScheduler.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
port: 10251
targetPort: 10251
## selector:
## component: kube-scheduler
##
selector: {}
serviceMonitor:
## @param kubeScheduler.serviceMonitor.interval Scrape interval (use by default, falling back to Prometheus' default)
##
interval: ""
## @param kubeScheduler.serviceMonitor.https Enable scraping kube-scheduler over https
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
##
https: false
## @param kubeScheduler.serviceMonitor.insecureSkipVerify Skip TLS certificate validation when scraping
##
insecureSkipVerify: ""
## @param kubeScheduler.serviceMonitor.serverName Name of the server to use when validating TLS certificate
##
serverName: ""
## @param kubeScheduler.serviceMonitor.metricRelabelings Metric relabeling
## metricRelabelings:
## - action: keep
## regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
## sourceLabels: [__name__]
##
metricRelabelings: []
## @param kubeScheduler.serviceMonitor.relabelings Relabel configs
## relabelings:
## - sourceLabels: [__meta_kubernetes_pod_node_name]
## separator: ;
## regex: ^(.*)$
## targetLabel: nodename
## replacement: $1
## action: replace
##
relabelings: []
## Component scraping coreDns
##
coreDns:
## @param coreDns.enabled Create a ServiceMonitor to scrape coredns service
##
enabled: true
## @param coreDns.namespace Namespace where core dns service is deployed.
##
namespace: kube-system
## Create a ServiceMonitor to scrape coredns service
## @param coreDns.service.enabled Whether or not to create a Service object for coredns
## @param coreDns.service.port Listening port of the coredns Service object
## @param coreDns.service.targetPort Port to target on the coredns Pods. This should be the port that coredns is exposing metrics on
## @param coreDns.service.selector Optional PODs Label selector for the service
##
service:
enabled: true
port: 9153
targetPort: 9153
## selector:
## component: kube-dns
##
selector: {}
serviceMonitor:
## @param coreDns.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used.
##
interval: ""
## @param coreDns.serviceMonitor.metricRelabelings Metric relabel configs to apply to samples before ingestion.
## metricRelabelings:
## - action: keep
## regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
## sourceLabels: [__name__]
##
metricRelabelings: []
## @param coreDns.serviceMonitor.relabelings Relabel configs to apply to samples before ingestion.
## relabelings:
## - sourceLabels: [__meta_kubernetes_pod_node_name]
## separator: ;
## regex: ^(.*)$
## targetLabel: nodename
## replacement: $1
## action: replace
##
relabelings: []
## Component scraping the kube-proxy
##
kubeProxy:
## @param kubeProxy.enabled Create a ServiceMonitor to scrape the kube-proxy Service
##
enabled: false
portal:
open:
enabled: true
updated: true