feat(prometheus): add some prometheus exporter-Apps (#1388)

* feat(prometheus): add some prometheus exporter-Apps

* delete notes

* little fix

* fix

* crlf and ignore

* keywords

* hmm

* no quote try

* portals

* skip tests for promcord

* hmm

* Add questions
This commit is contained in:
Kjeld Schouten-Lebbing 2021-11-23 21:36:27 +01:00 committed by GitHub
parent 0c054b8cbd
commit 19ef571469
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 1398 additions and 1 deletions

View File

@ -7,7 +7,7 @@ excluded-charts:
- charts/incubator/amcrest2mqtt
- charts/library/common
#- foundryvtt
#- promcord
- promcord
- charts/incubator/zigbee2mqtt
chart-repos:
- truecharts=https://truecharts.org

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# OWNERS file for Kubernetes
OWNERS
# helm-docs templates
*.gotmpl

View File

@ -0,0 +1,29 @@
apiVersion: v2
appVersion: "0.52.1"
dependencies:
- name: common
repository: https://truecharts.org
version: 8.9.3
deprecated: false
description: Discord bot that provides metrics from a Discord server
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
home: https://github.com/k8s-at-home/charts/tree/master/charts/stable/promcord
keywords:
- promcord
- discord
- metrics
kubeVersion: '>=1.16.0-0'
maintainers:
- email: info@truecharts.org
name: TrueCharts
url: https://truecharts.org
name: promcord
sources:
- https://github.com/nimarion/promcord
type: application
version: 0.0.1
annotations:
truecharts.org/catagories: |
- metrics
truecharts.org/SCALE-support: "true"
truecharts.org/grade: U

View File

@ -0,0 +1,14 @@
image:
repository: traefik/whoami
pullPolicy: IfNotPresent
tag: v1.6.1@sha256:2c52bb2c848038a33e40415c300b655d7976bafaf033ecf4a6679cb9e1715917
service:
main:
ports:
main:
port: 8080
args:
- --port
- '8080'

View File

@ -0,0 +1,127 @@
# Include{groups}
portals: {}
questions:
# Include{global}
- variable: env
group: "Container Configuration"
label: "Image Environment"
schema:
type: dict
attrs:
# Include{fixedEnv}
# Include{containerConfig}
- variable: serviceexpert
group: "Networking and Services"
label: "Show Expert Config"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: hostNetwork
group: "Networking and Services"
label: "Host-Networking (Complicated)"
schema:
type: boolean
default: false
# Include{serviceExpert}
# Include{serviceList}
# Include{persistenceList}
# Include{ingressList}
- variable: advancedSecurity
label: "Show Advanced Security Settings"
group: "Security and Permissions"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: securityContext
label: "Security Context"
schema:
type: dict
attrs:
- variable: privileged
label: "Privileged mode"
schema:
type: boolean
default: false
- variable: readOnlyRootFilesystem
label: "ReadOnly Root Filesystem"
schema:
type: boolean
default: false
- variable: allowPrivilegeEscalation
label: "Allow Privilege Escalation"
schema:
type: boolean
default: false
- variable: runAsNonRoot
label: "runAsNonRoot"
schema:
type: boolean
default: true
- variable: podSecurityContext
group: "Security and Permissions"
label: "Pod Security Context"
schema:
type: dict
attrs:
- variable: runAsUser
label: "runAsUser"
description: "The UserID of the user running the application"
schema:
type: int
default: 568
- variable: runAsGroup
label: "runAsGroup"
description: The groupID this App of the user running the application"
schema:
type: int
default: 568
- variable: fsGroup
label: "fsGroup"
description: "The group that should own ALL storage."
schema:
type: int
default: 568
- variable: supplementalGroups
label: "supplemental Groups"
schema:
type: list
default: []
items:
- variable: supplementalGroupsEntry
label: "supplemental Group"
schema:
type: int
- variable: fsGroupChangePolicy
label: "When should we take ownership?"
schema:
type: string
default: "OnRootMismatch"
enum:
- value: "OnRootMismatch"
description: "OnRootMismatch"
- value: "Always"
description: "Always"
# Include{resources}
# Include{metrics}
# Include{prometheusRule}
# Include{advanced}
# Include{addons}

View File

@ -0,0 +1,2 @@
{{/* Render the templates */}}
{{ include "common.all" . }}

View File

@ -0,0 +1,27 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.prometheusRule.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
groups:
- name: {{ include "common.names.fullname" . }}
rules:
- alert: PromcordAbsent
annotations:
description: Promcord has disappeared from Prometheus service discovery.
summary: Promcord is down.
expr: |
absent(up{job=~".*{{ include "common.names.fullname" . }}.*"} == 1)
for: 5m
labels:
severity: critical
{{- with .Values.metrics.prometheusRule.rules }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "common.labels.selectorLabels" . | nindent 6 }}
endpoints:
- port: metrics
{{- with .Values.metrics.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
path: /metrics
{{- end }}

View File

@ -0,0 +1,50 @@
image:
repository: biospheere/promcord
tag: latest
pullPolicy: IfNotPresent
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
main:
enabled: false
metrics:
enabled: true
protocol: TCP
port: 8080
# -- environment variables. See [application docs](https://github.com/nimarion/promcord/blob/master/README.md) for more details.
# @default -- See below
env:
# -- Set the container timezone
TZ: UTC
# -- Discord bot token
# DISCORD_TOKEN:
metrics:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: false
serviceMonitor:
interval: 1m
scrapeTimeout: 30s
labels: {}
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: PromcordAbsent
# annotations:
# description: Promcord has disappeared from Prometheus service discovery.
# summary: Promcord is disabled.
# expr: |
# absent(up{job=~".promcord.*"} == 1)
# for: 15m
# labels:
# severity: critical

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# OWNERS file for Kubernetes
OWNERS
# helm-docs templates
*.gotmpl

View File

@ -0,0 +1,29 @@
apiVersion: v2
appVersion: "0.52.1"
dependencies:
- name: common
repository: https://truecharts.org
version: 8.9.3
deprecated: false
description: Speedtest Exporter made in python using the official speedtest bin
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
home: https://github.com/k8s-at-home/charts/tree/master/charts/stable/speedtest-exporter
keywords:
- speedtest-exporter
- speedtest
- metrics
kubeVersion: '>=1.16.0-0'
maintainers:
- email: info@truecharts.org
name: TrueCharts
url: https://truecharts.org
name: speedtest-exporter
sources:
- https://github.com/MiguelNdeCarvalho/speedtest-exporter/
type: application
version: 0.0.1
annotations:
truecharts.org/catagories: |
- metrics
truecharts.org/SCALE-support: "true"
truecharts.org/grade: U

View File

@ -0,0 +1,151 @@
# Include{groups}
portals: {}
questions:
# Include{global}
- variable: env
group: "Container Configuration"
label: "Image Environment"
schema:
type: dict
attrs:
# Include{fixedEnv}
# Include{containerConfig}
- variable: serviceexpert
group: "Networking and Services"
label: "Show Expert Config"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: hostNetwork
group: "Networking and Services"
label: "Host-Networking (Complicated)"
schema:
type: boolean
default: false
# Include{serviceExpert}
# Include{serviceList}
# Include{persistenceList}
# Include{ingressList}
- variable: advancedSecurity
label: "Show Advanced Security Settings"
group: "Security and Permissions"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: securityContext
label: "Security Context"
schema:
type: dict
attrs:
- variable: privileged
label: "Privileged mode"
schema:
type: boolean
default: false
- variable: readOnlyRootFilesystem
label: "ReadOnly Root Filesystem"
schema:
type: boolean
default: false
- variable: allowPrivilegeEscalation
label: "Allow Privilege Escalation"
schema:
type: boolean
default: false
- variable: runAsNonRoot
label: "runAsNonRoot"
schema:
type: boolean
default: true
- variable: podSecurityContext
group: "Security and Permissions"
label: "Pod Security Context"
schema:
type: dict
attrs:
- variable: runAsUser
label: "runAsUser"
description: "The UserID of the user running the application"
schema:
type: int
default: 568
- variable: runAsGroup
label: "runAsGroup"
description: The groupID this App of the user running the application"
schema:
type: int
default: 568
- variable: fsGroup
label: "fsGroup"
description: "The group that should own ALL storage."
schema:
type: int
default: 568
- variable: supplementalGroups
label: "supplemental Groups"
schema:
type: list
default: []
items:
- variable: supplementalGroupsEntry
label: "supplemental Group"
schema:
type: int
- variable: fsGroupChangePolicy
label: "When should we take ownership?"
schema:
type: string
default: "OnRootMismatch"
enum:
- value: "OnRootMismatch"
description: "OnRootMismatch"
- value: "Always"
description: "Always"
# Include{resources}
# Include{metrics}
# Include{prometheusRule}
- variable: downloadLimit
label: "Download Limit"
description: "Download speed you want alerts to be triggered in Mbps"
schema:
type: int
default: 400
- variable: uploadLimit
label: "Upload Limit"
description: "Upload speed you want alerts to be triggered in Mbps"
schema:
type: int
default: 400
- variable: pingLimit
label: "Ping Limit"
description: "Ping latency you want alerts to be triggered in ms"
schema:
type: int
default: 10
- variable: jitterLimit
label: "Jitter Limit"
description: "Jitter latency you want alerts to be triggered in ms"
schema:
type: int
default: 30
# Include{advanced}
# Include{addons}

View File

@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@ -0,0 +1,67 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.prometheusRule.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
groups:
- name: {{ include "common.names.fullname" . }}
rules:
- alert: SpeedtestExporterAbsent
annotations:
description: Speedtest Exporter has disappeared from Prometheus target discovery.
summary: Speedtest Exporter is down.
expr: |
absent(up{job=~".*{{ include "common.names.fullname" . }}.*"} == 1)
for: {{ trimAll "m" .Values.metrics.serviceMonitor.interval | add 15 }}m
labels:
severity: critical
- alert: SpeedtestSlowInternetDownload
annotations:
description: Internet download speed is averaging {{ "{{ humanize $value }}" }} Mbps.
summary: SpeedTest slow internet download.
expr: |
avg_over_time(speedtest_download_bits_per_second{job=~".*{{ include "common.names.fullname" . }}.*"}[4h])
< {{ .Values.metrics.prometheusRule.downloadLimit }}
for: 0m
labels:
severity: warning
- alert: SpeedtestSlowInternetUpload
annotations:
description: Internet upload speed is averaging {{ "{{ humanize $value }}" }} Mbps.
summary: SpeedTest slow internet upload.
expr: |
avg_over_time(speedtest_upload_bits_per_second{job=~".*{{ include "common.names.fullname" . }}.*"}[4h])
< {{ .Values.metrics.prometheusRule.uploadLimit }}
for: 0m
labels:
severity: warning
- alert: SpeedtestHighPingLatency
annotations:
description: Internet ping latency is averaging {{ "{{ humanize $value }}" }} ms.
summary: SpeedTest high ping latency.
expr: |
avg_over_time(speedtest_ping_latency_milliseconds{job=~".*{{ include "common.names.fullname" . }}.*"}[4h])
> {{ .Values.metrics.prometheusRule.pingLimit }}
for: 0m
labels:
severity: warning
- alert: SpeedtestHighJitterLatency
annotations:
description: Internet jitter latency is averaging {{ "{{ humanize $value }}" }} ms.
summary: SpeedTest high jitter latency.
expr: |
avg_over_time(speedtest_jitter_latency_milliseconds{job=~".*{{ include "common.names.fullname" . }}.*"}[4h])
> {{ .Values.metrics.prometheusRule.jitterLimit }}
for: 0m
labels:
severity: warning
{{- with .Values.metrics.prometheusRule.rules }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "common.labels.selectorLabels" . | nindent 6 }}
endpoints:
- port: metrics
{{- with .Values.metrics.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
path: /metrics
{{- end }}

View File

@ -0,0 +1,61 @@
image:
# -- image repository
repository: ghcr.io/miguelndecarvalho/speedtest-exporter
# -- image tag
tag: v3.2.2
# -- image pull policy
pullPolicy: IfNotPresent
# -- environment variables. See [application docs](https://docs.miguelndecarvalho.pt/projects/speedtest-exporter/) for more details.
# @default -- See below
env:
# -- Set the container timezone
TZ: UTC
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
main:
enabled: false
metrics:
enabled: true
protocol: TCP
port: 9798
metrics:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: false
serviceMonitor:
# -- The interval field must use minutes for the padding to calculate properly.
interval: 60m
scrapeTimeout: 1m
labels: {}
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
# -- Download speed you want alerts to be triggered in Mbps
downloadLimit: 400
# -- Upload speed you want alerts to be triggered in Mbps
uploadLimit: 400
# -- Ping latency you want alerts to be triggered in ms
pingLimit: 10
# -- Jitter latency you want alerts to be triggered in ms
jitterLimit: 30
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: SpeedtestSlowInternetDownload
# annotations:
# description: Internet download speed is averaging {{ "{{ humanize $value }}" }} Mbps.
# summary: SpeedTest slow internet download.
# expr: |
# avg_over_time(speedtest_download_bits_per_second{job=~".*{{ include "common.names.fullname" . }}.*"}[4h])
# < {{ .Values.metrics.prometheusRule.downloadLimit }}
# for: 0m
# labels:
# severity: warning

View File

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# OWNERS file for Kubernetes
OWNERS
# helm-docs templates
*.gotmpl

View File

@ -0,0 +1,30 @@
apiVersion: v2
appVersion: "0.52.1"
dependencies:
- name: common
repository: https://truecharts.org
version: 8.9.3
deprecated: false
description: Collect ALL UniFi Controller, Site, Device & Client Data - Export to InfluxDB or Prometheus
icon: https://raw.githubusercontent.com/wiki/unifi-poller/unifi-poller/images/unifi-poller-logo.png
home: https://github.com/k8s-at-home/charts/tree/master/charts/stable/unifi-poller
keywords:
- unifi
- unifi-poller
- metrics
kubeVersion: '>=1.16.0-0'
maintainers:
- email: info@truecharts.org
name: TrueCharts
url: https://truecharts.org
name: unifi-poller
sources:
- https://github.com/unifi-poller/unifi-poller
- https://hub.docker.com/r/golift/unifi-poller
type: application
version: 0.0.1
annotations:
truecharts.org/catagories: |
- metrics
truecharts.org/SCALE-support: "true"
truecharts.org/grade: U

View File

@ -0,0 +1,127 @@
# Include{groups}
portals: {}
questions:
# Include{global}
- variable: env
group: "Container Configuration"
label: "Image Environment"
schema:
type: dict
attrs:
# Include{fixedEnv}
# Include{containerConfig}
- variable: serviceexpert
group: "Networking and Services"
label: "Show Expert Config"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: hostNetwork
group: "Networking and Services"
label: "Host-Networking (Complicated)"
schema:
type: boolean
default: false
# Include{serviceExpert}
# Include{serviceList}
# Include{persistenceList}
# Include{ingressList}
- variable: advancedSecurity
label: "Show Advanced Security Settings"
group: "Security and Permissions"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: securityContext
label: "Security Context"
schema:
type: dict
attrs:
- variable: privileged
label: "Privileged mode"
schema:
type: boolean
default: false
- variable: readOnlyRootFilesystem
label: "ReadOnly Root Filesystem"
schema:
type: boolean
default: false
- variable: allowPrivilegeEscalation
label: "Allow Privilege Escalation"
schema:
type: boolean
default: false
- variable: runAsNonRoot
label: "runAsNonRoot"
schema:
type: boolean
default: true
- variable: podSecurityContext
group: "Security and Permissions"
label: "Pod Security Context"
schema:
type: dict
attrs:
- variable: runAsUser
label: "runAsUser"
description: "The UserID of the user running the application"
schema:
type: int
default: 568
- variable: runAsGroup
label: "runAsGroup"
description: The groupID this App of the user running the application"
schema:
type: int
default: 568
- variable: fsGroup
label: "fsGroup"
description: "The group that should own ALL storage."
schema:
type: int
default: 568
- variable: supplementalGroups
label: "supplemental Groups"
schema:
type: list
default: []
items:
- variable: supplementalGroupsEntry
label: "supplemental Group"
schema:
type: int
- variable: fsGroupChangePolicy
label: "When should we take ownership?"
schema:
type: string
default: "OnRootMismatch"
enum:
- value: "OnRootMismatch"
description: "OnRootMismatch"
- value: "Always"
description: "Always"
# Include{resources}
# Include{metrics}
# Include{prometheusRule}
# Include{advanced}
# Include{addons}

View File

@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@ -0,0 +1,27 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.prometheusRule.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
groups:
- name: {{ include "common.names.fullname" . }}
rules:
- alert: UnifiPollerAbsent
annotations:
description: Unifi Poller has disappeared from Prometheus service discovery.
summary: Unifi Poller is down.
expr: |
absent(up{job=~".*{{ include "common.names.fullname" . }}.*"} == 1)
for: 5m
labels:
severity: critical
{{- with .Values.metrics.prometheusRule.rules }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "common.labels.selectorLabels" . | nindent 6 }}
endpoints:
- port: metrics
{{- with .Values.metrics.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
path: /metrics
{{- end }}

View File

@ -0,0 +1,78 @@
image:
# -- Image to deploy.
repository: golift/unifi-poller
# -- Image [k8s pull policy](https://kubernetes.io/docs/concepts/containers/images/#updating-images).
pullPolicy: IfNotPresent
# -- Image tag to deploy.
tag: 2.1.3
# -- Environment variable configuration options for unifi-poller ([docs](https://unifipoller.com/docs/install/configuration)).
# Note: a [configuration file](https://github.com/unifi-poller/unifi-poller/blob/master/examples/up.conf.example) is also supported.
env: {}
# TZ: UTC
# UP_UNIFI_DEFAULT_URL: "https://127.0.0.1:8443"
# UP_UNIFI_DEFAULT_USER: "unifipoller"
# UP_UNIFI_DEFAULT_PASS: "unifipoller"
# UP_PROMETHEUS_DISABLE: true
# UP_INFLUXDB_DISABLE: true
service:
main:
ports:
main:
enabled: false
metrics:
enabled: true
protocol: TCP
port: 9130
ingress:
main:
# -- Expose [unifi-poller's web interface](https://unifipoller.com/docs/advanced/webserver)
# (if enabled in the configuration) via the k8s ingress by setting this true.
enabled: false
influxdb:
# -- Create an InfluxDB instance as a [unifi-poller storage backend](https://unifipoller.com/docs/dependencies/influxdb).
# See [bitnami/influxdb](https://github.com/bitnami/charts/tree/master/bitnami/influxdb) for more options.
enabled: false
# -- InfluxDB cluster deployment architecture.
architecture: standalone
# -- Database name to automatically initialize.
# Be sure to match in unifi-poller's [influxdb config](https://unifipoller.com/docs/install/configuration#influxdb).
database: unifi
# -- Enable InfluxDB authentication, supported by unifi-poller.
# Be sure to match in unifi-poller's [influxdb config](https://unifipoller.com/docs/install/configuration#influxdb).
authEnabled: false
persistence:
# -- Enable persistence to store in a PV so data survives pod restarts.
enabled: false
# storageClass: ""
# size: 8Gi
metrics:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: false
serviceMonitor:
interval: 1m
scrapeTimeout: 30s
labels: {}
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: UnifiPollerAbsent
# annotations:
# description: Unifi Poller has disappeared from Prometheus service discovery.
# summary: Unifi Poller is down.
# expr: |
# absent(up{job=~".*unifi-poller.*"} == 1)
# for: 5m
# labels:
# severity: critical

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

View File

@ -0,0 +1,31 @@
apiVersion: v2
appVersion: "0.52.1"
dependencies:
- name: common
repository: https://truecharts.org
version: 8.9.3
deprecated: false
description: Prometheus Exporter for the official uptimerobot CLI
icon: https://cdn.foliovision.com/images/2019/03/icon-uptimerobot-1024.png
home: https://github.com/k8s-at-home/charts/tree/master/charts/stable/uptimerobot-prometheus
keywords:
- uptimerobot
- prometheus
- grafana
- metrics
kubeVersion: '>=1.16.0-0'
maintainers:
- email: info@truecharts.org
name: TrueCharts
url: https://truecharts.org
name: uptimerobot-prometheus
sources:
- https://github.com/lekpamartin/uptimerobot_exporter
- https://github.com/k8s-at-home/charts/tree/master/charts/uptimerobot-prometheus
type: application
version: 0.0.1
annotations:
truecharts.org/catagories: |
- metrics
truecharts.org/SCALE-support: "true"
truecharts.org/grade: U

View File

@ -0,0 +1,127 @@
# Include{groups}
portals: {}
questions:
# Include{global}
- variable: env
group: "Container Configuration"
label: "Image Environment"
schema:
type: dict
attrs:
# Include{fixedEnv}
# Include{containerConfig}
- variable: serviceexpert
group: "Networking and Services"
label: "Show Expert Config"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: hostNetwork
group: "Networking and Services"
label: "Host-Networking (Complicated)"
schema:
type: boolean
default: false
# Include{serviceExpert}
# Include{serviceList}
# Include{persistenceList}
# Include{ingressList}
- variable: advancedSecurity
label: "Show Advanced Security Settings"
group: "Security and Permissions"
schema:
type: boolean
default: false
show_subquestions_if: true
subquestions:
- variable: securityContext
label: "Security Context"
schema:
type: dict
attrs:
- variable: privileged
label: "Privileged mode"
schema:
type: boolean
default: false
- variable: readOnlyRootFilesystem
label: "ReadOnly Root Filesystem"
schema:
type: boolean
default: false
- variable: allowPrivilegeEscalation
label: "Allow Privilege Escalation"
schema:
type: boolean
default: false
- variable: runAsNonRoot
label: "runAsNonRoot"
schema:
type: boolean
default: true
- variable: podSecurityContext
group: "Security and Permissions"
label: "Pod Security Context"
schema:
type: dict
attrs:
- variable: runAsUser
label: "runAsUser"
description: "The UserID of the user running the application"
schema:
type: int
default: 568
- variable: runAsGroup
label: "runAsGroup"
description: The groupID this App of the user running the application"
schema:
type: int
default: 568
- variable: fsGroup
label: "fsGroup"
description: "The group that should own ALL storage."
schema:
type: int
default: 568
- variable: supplementalGroups
label: "supplemental Groups"
schema:
type: list
default: []
items:
- variable: supplementalGroupsEntry
label: "supplemental Group"
schema:
type: int
- variable: fsGroupChangePolicy
label: "When should we take ownership?"
schema:
type: string
default: "OnRootMismatch"
enum:
- value: "OnRootMismatch"
description: "OnRootMismatch"
- value: "Always"
description: "Always"
# Include{resources}
# Include{metrics}
# Include{prometheusRule}
# Include{advanced}
# Include{addons}

View File

@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@ -0,0 +1,27 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.prometheusRule.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
groups:
- name: {{ include "common.names.fullname" . }}
rules:
- alert: UptimeRobotExporterAbsent
annotations:
description: Uptime Robot Exporter has disappeared from Prometheus service discovery.
summary: Uptime Robot Exporter is down.
expr: |
absent(up{job=~".*{{ include "common.names.fullname" . }}.*"} == 1)
for: 5m
labels:
severity: critical
{{- with .Values.metrics.prometheusRule.rules }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "common.names.fullname" . }}
labels:
{{- include "common.labels" . | nindent 4 }}
{{- with .Values.metrics.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "common.labels.selectorLabels" . | nindent 6 }}
endpoints:
- port: metrics
{{- with .Values.metrics.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
path: /metrics
{{- end }}

View File

@ -0,0 +1,59 @@
image:
# -- image repository
repository: billimek/prometheus-uptimerobot-exporter
# -- image tag
tag: 0.0.1
# -- image pull policy
pullPolicy: IfNotPresent
# -- environment variables. See [application docs](https://github.com/lekpamartin/uptimerobot_exporter/blob/master/docker-compose.yml) for more details.
# @default -- See below
env:
# -- Set the container timezone
TZ: UTC
# -- Set the uptimerobot API key
UPTIMEROBOT_API_KEY: ""
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
main:
ports:
main:
enabled: false
metrics:
enabled: true
protocol: TCP
port: 9705
ingress:
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
metrics:
# -- Enable and configure a Prometheus serviceMonitor for the chart under this key.
# @default -- See values.yaml
enabled: false
serviceMonitor:
interval: 1m
scrapeTimeout: 30s
labels: {}
# -- Enable and configure Prometheus Rules for the chart under this key.
# @default -- See values.yaml
prometheusRule:
enabled: false
labels: {}
# -- Configure additionial rules for the chart under this key.
# @default -- See prometheusrules.yaml
rules: []
# - alert: UptimeRobotExporterAbsent
# annotations:
# description: Uptime Robot Exporter has disappeared from Prometheus service discovery.
# summary: Uptime Robot Exporter is down.
# expr: |
# absent(up{job=~".*uptimerobot.*"} == 1)
# for: 5m
# labels:
# severity: critical

View File

@ -19,6 +19,8 @@ groups:
description: "Specify resources/devices to be allocated to workload"
- name: "Middlewares"
description: "Traefik Middlewares"
- name: "Metrics"
description: "Metrics"
- name: "Addons"
description: "Addon Configuration"
- name: "Advanced"

View File

@ -0,0 +1,33 @@
- variable: metrics
group: "Metrics"
label: "Prometheus Metrics"
schema:
type: dict
attrs:
- variable: enabled
label: "Enabled"
description: "Enable Prometheus Metrics"
schema:
type: boolean
default: true
show_subquestions_if: true
subquestions:
- variable: serviceMonitor
label: "Service Monitor Settings"
schema:
type: dict
attrs:
- variable: interval
label: "Scrape Interval"
description: "Scrape interval time"
schema:
type: string
default: "1m"
required: true
- variable: scrapeTimeout
label: "Scrape Timeout"
description: "Scrape timeout Time"
schema:
type: string
default: "30s"
required: true

View File

@ -0,0 +1,33 @@
- variable: metrics
group: "Metrics"
label: "Prometheus Metrics"
schema:
type: dict
attrs:
- variable: enabled
label: "Enabled"
description: "Enable Prometheus Metrics"
schema:
type: boolean
default: true
show_subquestions_if: true
subquestions:
- variable: serviceMonitor
label: "Service Monitor Settings"
schema:
type: dict
attrs:
- variable: interval
label: "Scrape Interval"
description: "Scrape interval time"
schema:
type: string
default: "3m"
required: true
- variable: scrapeTimeout
label: "Scrape Timeout"
description: "Scrape timeout Time"
schema:
type: string
default: "1m"
required: true

View File

@ -0,0 +1,33 @@
- variable: metrics
group: "Metrics"
label: "Prometheus Metrics"
schema:
type: dict
attrs:
- variable: enabled
label: "Enabled"
description: "Enable Prometheus Metrics"
schema:
type: boolean
default: true
show_subquestions_if: true
subquestions:
- variable: serviceMonitor
label: "Service Monitor Settings"
schema:
type: dict
attrs:
- variable: interval
label: "Scrape Interval"
description: "Scrape interval time"
schema:
type: string
default: "60m"
required: true
- variable: scrapeTimeout
label: "Scrape Timeout"
description: "Scrape timeout Time"
schema:
type: string
default: "1m"
required: true

View File

@ -0,0 +1,13 @@
- variable: prometheusRule
label: "PrometheusRule"
description: "Enable and configure Prometheus Rules for the App."
schema:
type: dict
attrs:
- variable: enabled
label: "Enabled"
description: "Enable Prometheus Metrics"
schema:
type: boolean
default: false
# TODO: Rule List section

View File

@ -215,6 +215,26 @@ include_questions(){
/# Include{addons}/ { for (i=0;i<n;++i) print a[i]; next }
1' templates/questions/addons.yaml ${target}/questions.yaml > tmp && mv tmp ${target}/questions.yaml
# Replace # Include{metrics} with the standard metrics codesnippet
awk 'NR==FNR { a[n++]=$0; next }
/# Include{metrics}/ { for (i=0;i<n;++i) print a[i]; next }
1' templates/questions/metrics.yaml ${target}/questions.yaml > tmp && mv tmp ${target}/questions.yaml
# Replace # Include{metrics3m} with the standard metrics3m codesnippet
awk 'NR==FNR { a[n++]=$0; next }
/# Include{metrics3m}/ { for (i=0;i<n;++i) print a[i]; next }
1' templates/questions/metrics3m.yaml ${target}/questions.yaml > tmp && mv tmp ${target}/questions.yaml
# Replace # Include{metrics60m} with the standard metrics60m codesnippet
awk 'NR==FNR { a[n++]=$0; next }
/# Include{metrics60m}/ { for (i=0;i<n;++i) print a[i]; next }
1' templates/questions/metrics60m.yaml ${target}/questions.yaml > tmp && mv tmp ${target}/questions.yaml
# Replace # Include{prometheusRule} with the standard prometheusRule codesnippet
awk 'NR==FNR { a[n++]=$0; next }
/# Include{prometheusRule}/ { for (i=0;i<n;++i) print a[i]; next }
1' templates/questions/prometheusRule.yaml ${target}/questions.yaml > tmp && mv tmp ${target}/questions.yaml
# Replace # Include{advanced} with the standard advanced codesnippet
awk 'NR==FNR { a[n++]=$0; next }
/# Include{advanced}/ { for (i=0;i<n;++i) print a[i]; next }