396 lines
19 KiB
YAML
396 lines
19 KiB
YAML
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: {{ template "common.names.fullname" . }}-config
|
|
labels:
|
|
{{- include "common.labels" . | nindent 4 }}
|
|
data:
|
|
config.yml: |
|
|
mqtt:
|
|
# Required: host name
|
|
host: {{ .Values.frigate.mqtt.host }}
|
|
# Optional: port (default: shown below)
|
|
port: {{ .Values.frigate.mqtt.port }}
|
|
# Optional: topic prefix (default: shown below)
|
|
# NOTE: must be unique if you are running multiple instances
|
|
topic_prefix: frigate
|
|
# Optional: client id (default: shown below)
|
|
# NOTE: must be unique if you are running multiple instances
|
|
client_id: frigate
|
|
{{- if .Values.frigate.mqtt.authenticated }}
|
|
user: {{ .Values.frigate.mqtt.username }}
|
|
password: {{ .Values.frigate.mqtt.password }}
|
|
# Optional: user
|
|
# user: mqtt_user
|
|
# Optional: password
|
|
# NOTE: MQTT password can be specified with an environment variables that must begin with 'FRIGATE_'.
|
|
# e.g. password: '{FRIGATE_MQTT_PASSWORD}'
|
|
# password: password
|
|
{{- end }}
|
|
# Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None)
|
|
# tls_ca_certs: /path/to/ca.crt
|
|
# Optional: tls_client_cert and tls_client key in order to use self-signed client
|
|
# certificates (default: None)
|
|
# NOTE: certificate must not be password-protected
|
|
# do not set user and password when using a client certificate
|
|
# tls_client_cert: /path/to/client.crt
|
|
# tls_client_key: /path/to/client.key
|
|
# Optional: tls_insecure (true/false) for enabling TLS verification of
|
|
# the server hostname in the server certificate (default: None)
|
|
# tls_insecure: false
|
|
# Optional: interval in seconds for publishing stats (default: shown below)
|
|
stats_interval: 60
|
|
|
|
# Optional: Detectors configuration. Defaults to a single CPU detector
|
|
detectors:
|
|
# Required: name of the detector
|
|
{{- if .Values.persistence.coral.enabled }}
|
|
coral:
|
|
# Required: type of the detector
|
|
# Valid values are 'edgetpu' (requires device property below) and 'cpu'.
|
|
type: edgetpu
|
|
# Optional: device name as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api
|
|
device: usb
|
|
{{- else }}
|
|
cpu1:
|
|
type: cpu
|
|
# Optional: num_threads value passed to the tflite.Interpreter (default: shown below)
|
|
# This value is only used for CPU types
|
|
num_threads: 2
|
|
{{- end }}
|
|
|
|
|
|
|
|
# Optional: Database configuration
|
|
# database:
|
|
# The path to store the SQLite DB (default: shown below)
|
|
# path: /media/frigate/frigate.db
|
|
|
|
# Optional: model modifications
|
|
# model:
|
|
# Optional: path to the model (default: automatic based on detector)
|
|
# path: /edgetpu_model.tflite
|
|
# Optional: path to the labelmap (default: shown below)
|
|
# labelmap_path: /labelmap.txt
|
|
# Required: Object detection model input width (default: shown below)
|
|
# width: 320
|
|
# Required: Object detection model input height (default: shown below)
|
|
# height: 240
|
|
# Optional: Label name modifications. These are merged into the standard labelmap.
|
|
# labelmap:
|
|
# 2: vehicle
|
|
|
|
# Optional: logger verbosity settings
|
|
logger:
|
|
# Optional: Default log verbosity (default: shown below)
|
|
default: info
|
|
# Optional: Component specific logger overrides
|
|
# logs:
|
|
# frigate.event: info
|
|
|
|
# Optional: set environment variables
|
|
# environment_vars:
|
|
# EXAMPLE_VAR: value
|
|
|
|
# Optional: birdseye configuration
|
|
birdseye:
|
|
# Optional: Enable birdseye view (default: shown below)
|
|
enabled: {{ .Values.frigate.birdseye.enabled }}
|
|
# Optional: Width of the output resolution (default: shown below)
|
|
width: 1280
|
|
# Optional: Height of the output resolution (default: shown below)
|
|
height: 720
|
|
# Optional: Encoding quality of the mpeg1 feed (default: shown below)
|
|
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
|
|
quality: 8
|
|
# Optional: Mode of the view. Available options are: objects, motion, and continuous
|
|
# objects - cameras are included if they have had a tracked object within the last 30 seconds
|
|
# motion - cameras are included if motion was detected in the last 30 seconds
|
|
# continuous - all cameras are included always
|
|
mode: {{ .Values.frigate.birdseye.mode }}
|
|
|
|
# Optional: ffmpeg configuration
|
|
ffmpeg:
|
|
# Optional: global ffmpeg args (default: shown below) warning instead of debug
|
|
global_args: -hide_banner -loglevel debug
|
|
# Optional: global hwaccel args (default: shown below)
|
|
# NOTE: See hardware acceleration docs for your specific device
|
|
# hwaccel_args: []
|
|
# Optional: global input args (default: shown below)
|
|
# input_args: -avoid_negative_ts make_zero -fflags +genpts+discardcorrupt -rtsp_transport tcp -stimeout 5000000 -use_wallclock_as_timestamps 1
|
|
# Optional: global output args
|
|
# output_args:
|
|
# Optional: output args for detect streams (default: shown below)
|
|
# detect: -f rawvideo -pix_fmt yuv420p
|
|
# Optional: output args for record streams (default: shown below)
|
|
# record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an
|
|
# Optional: output args for rtmp streams (default: shown below)
|
|
# rtmp: -c copy -f flv
|
|
|
|
# Optional: Detect configuration
|
|
# NOTE: Can be overridden at the camera level
|
|
detect:
|
|
# Optional: width of the frame for the input with the detect role (default: shown below)
|
|
width: 1280
|
|
# Optional: height of the frame for the input with the detect role (default: shown below)
|
|
height: 720
|
|
# Optional: desired fps for your camera for the input with the detect role (default: shown below)
|
|
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
|
|
fps: 5
|
|
# Optional: enables detection for the camera (default: True)
|
|
# This value can be set via MQTT and will be updated in startup based on retained value
|
|
enabled: True
|
|
# Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate)
|
|
# max_disappeared: 25
|
|
# Optional: Frequency for running detection on stationary objects (default: 10x the frame rate)
|
|
# stationary_interval: 50
|
|
|
|
# Optional: Object configuration
|
|
# NOTE: Can be overridden at the camera level
|
|
objects:
|
|
# Optional: list of objects to track from labelmap.txt (default: shown below)
|
|
track:
|
|
- person
|
|
- car
|
|
- motorcyle
|
|
- cat
|
|
- dog
|
|
# Optional: mask to prevent all object types from being detected in certain areas (default: no mask)
|
|
# Checks based on the bottom center of the bounding box of the object.
|
|
# NOTE: This mask is COMBINED with the object type specific mask below
|
|
# mask: 0,0,1000,0,1000,200,0,200
|
|
# Optional: filters to reduce false positives for specific object types
|
|
filters:
|
|
person:
|
|
# Optional: minimum width*height of the bounding box for the detected object (default: 0)
|
|
min_area: 5000
|
|
# Optional: maximum width*height of the bounding box for the detected object (default: 24000000)
|
|
max_area: 100000
|
|
# Optional: minimum score for the object to initiate tracking (default: shown below)
|
|
# min_score: 0.5
|
|
# Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below)
|
|
# threshold: 0.7
|
|
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
|
|
# Checks based on the bottom center of the bounding box of the object
|
|
# mask: 0,0,1000,0,1000,200,0,200
|
|
|
|
# Optional: Motion configuration
|
|
# NOTE: Can be overridden at the camera level
|
|
motion:
|
|
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
|
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
|
# The value should be between 1 and 255.
|
|
# threshold: 25
|
|
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: 30)
|
|
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
|
|
# make motion detection more sensitive to smaller moving objects.
|
|
# As a rule of thumb:
|
|
# - 15 - high sensitivity
|
|
# - 30 - medium sensitivity
|
|
# - 50 - low sensitivity
|
|
contour_area: 30
|
|
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging the motion delta across multiple frames (default: shown below)
|
|
# Higher values mean the current frame impacts the delta a lot, and a single raindrop may register as motion.
|
|
# Too low and a fast moving person wont be detected as motion.
|
|
# delta_alpha: 0.2
|
|
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
|
|
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
|
|
# Low values will cause things like moving shadows to be detected as motion for longer.
|
|
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
|
|
# frame_alpha: 0.2
|
|
# Optional: Height of the resized motion frame (default: 50)
|
|
# This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense
|
|
# of higher CPU usage. Lower values result in less CPU, but small changes may not register as motion.
|
|
# frame_height: 50
|
|
# Optional: motion mask
|
|
# NOTE: see docs for more detailed info on creating masks
|
|
# mask: 0,900,1080,900,1080,1920,0,1920
|
|
|
|
# Optional: Record configuration
|
|
# NOTE: Can be overridden at the camera level
|
|
record:
|
|
# Optional: Enable recording (default: shown below)
|
|
enabled: False
|
|
# Optional: Retention settings for recording
|
|
retain:
|
|
# Optional: Number of days to retain recordings regardless of events (default: shown below)
|
|
# NOTE: This should be set to 0 and retention should be defined in events section below
|
|
# if you only want to retain recordings of events.
|
|
days: 7
|
|
# Optional: Mode for retention. Available options are: all, motion, and active_objects
|
|
# all - save all recording segments regardless of activity
|
|
# motion - save all recordings segments with any detected motion
|
|
# active_objects - save all recording segments with active/moving objects
|
|
# NOTE: this mode only applies when the days setting above is greater than 0
|
|
mode: motion
|
|
# Optional: Event recording settings
|
|
events:
|
|
# Optional: Maximum length of time to retain video during long events. (default: shown below)
|
|
# NOTE: If an object is being tracked for longer than this amount of time, the retained recordings
|
|
# will be the last x seconds of the event unless retain->days under record is > 0.
|
|
max_seconds: 300
|
|
# Optional: Number of seconds before the event to include (default: shown below)
|
|
pre_capture: 5
|
|
# Optional: Number of seconds after the event to include (default: shown below)
|
|
post_capture: 5
|
|
# Optional: Objects to save recordings for. (default: all tracked objects)
|
|
# objects:
|
|
# - person
|
|
# Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones)
|
|
# required_zones: []
|
|
# Optional: Retention settings for recordings of events
|
|
retain:
|
|
# Required: Default retention days (default: shown below)
|
|
default: 14
|
|
# Optional: Mode for retention. (default: shown below)
|
|
# all - save all recording segments for events regardless of activity
|
|
# motion - save all recordings segments for events with any detected motion
|
|
# active_objects - save all recording segments for event with active/moving objects
|
|
#
|
|
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
|
|
# here, the segments will already be gone by the time this mode is applied.
|
|
# For example, if the camera retain mode is "motion", the segments without motion are
|
|
# never stored, so setting the mode to "all" here won't bring them back.
|
|
mode: active_objects
|
|
# Optional: Per object retention days
|
|
# objects:
|
|
# person: 15
|
|
|
|
# Optional: Configuration for the jpg snapshots written to the clips directory for each event
|
|
# NOTE: Can be overridden at the camera level
|
|
snapshots:
|
|
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
|
|
# This value can be set via MQTT and will be updated in startup based on retained value
|
|
enabled: False
|
|
# Optional: print a timestamp on the snapshots (default: shown below)
|
|
# timestamp: False
|
|
# Optional: draw bounding box on the snapshots (default: shown below)
|
|
# bounding_box: False
|
|
# Optional: crop the snapshot (default: shown below)
|
|
# crop: False
|
|
# Optional: height to resize the snapshot to (default: original size)
|
|
# height: 175
|
|
# Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones)
|
|
required_zones: []
|
|
# Optional: Camera override for retention settings (default: global values)
|
|
retain:
|
|
# Required: Default retention days (default: shown below)
|
|
default: 10
|
|
# Optional: Per object retention days
|
|
objects:
|
|
person: 15
|
|
|
|
# Optional: RTMP configuration
|
|
# NOTE: Can be overridden at the camera level
|
|
rtmp:
|
|
# Optional: Enable the RTMP stream (default: True)
|
|
enabled: True
|
|
|
|
# Optional: Live stream configuration for WebUI
|
|
# NOTE: Can be overridden at the camera level
|
|
live:
|
|
# Optional: Set the height of the live stream. (default: 720)
|
|
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
|
# reduce bandwidth required for viewing the live stream. Width is computed to match known aspect ratio.
|
|
# height: 720
|
|
# Optional: Set the encode quality of the live stream (default: shown below)
|
|
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
|
|
quality: 8
|
|
|
|
# Optional: in-feed timestamp style configuration
|
|
# NOTE: Can be overridden at the camera level
|
|
# timestamp_style:
|
|
# Optional: Position of the timestamp (default: shown below)
|
|
# "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right)
|
|
# position: "tl"
|
|
# Optional: Format specifier conform to the Python package "datetime" (default: shown below)
|
|
# Additional Examples:
|
|
# german: "%d.%m.%Y %H:%M:%S"
|
|
# format: "%m/%d/%Y %H:%M:%S"
|
|
# Optional: Color of font
|
|
# color:
|
|
# All Required when color is specified (default: shown below)
|
|
# red: 255
|
|
# green: 255
|
|
# blue: 255
|
|
# Optional: Line thickness of font (default: shown below)
|
|
# thickness: 2
|
|
# Optional: Effect of lettering (default: shown below)
|
|
# None (No effect),
|
|
# "solid" (solid background in inverse color of font)
|
|
# "shadow" (shadow for font)
|
|
effect: solid
|
|
|
|
# Required
|
|
cameras:
|
|
{{- range .Values.frigate.cameras }}
|
|
# Required: name of the camera
|
|
{{ .name }}:
|
|
objects:
|
|
# Optional: list of objects to track from labelmap.txt (default: shown below)
|
|
track:
|
|
{{- range .objects.track }}
|
|
- {{ . }}
|
|
{{- end }}
|
|
detect:
|
|
width: {{ .detect.width }}
|
|
height: {{ .detect.height }}
|
|
# Required: ffmpeg settings for the camera
|
|
ffmpeg:
|
|
# Required: A list of input streams for the camera. See documentation for more information.
|
|
inputs:
|
|
{{- range .inputs }}
|
|
- path: {{ .path }}
|
|
roles:
|
|
{{- range .roles }}
|
|
- {{ . }}
|
|
{{- end }}
|
|
{{- end }}
|
|
record:
|
|
enabled: {{ .record.enabled }}
|
|
# Optional: timeout for highest scoring image before allowing it
|
|
# to be replaced by a newer image. (default: shown below)
|
|
best_image_timeout: 60
|
|
snapshots:
|
|
enabled: {{ .snapshots.enabled }}
|
|
|
|
# Optional: zones for this camera
|
|
# zones:
|
|
# Required: name of the zone
|
|
# NOTE: This must be different than any camera names, but can match with another zone on another
|
|
# camera.
|
|
# front_steps:
|
|
# Required: List of x,y coordinates to define the polygon of the zone.
|
|
# NOTE: Coordinates can be generated at https://www.image-map.net/
|
|
# coordinates: 545,1077,747,939,788,805
|
|
# Optional: List of objects that can trigger this zone (default: all tracked objects)
|
|
# objects:
|
|
# - person
|
|
# Optional: Zone level object filters.
|
|
# NOTE: The global and camera filters are applied upstream.
|
|
# filters:
|
|
# person:
|
|
# min_area: 5000
|
|
# max_area: 100000
|
|
# threshold: 0.7
|
|
|
|
# Optional: Configuration for the jpg snapshots published via MQTT
|
|
# mqtt:
|
|
# Optional: Enable publishing snapshot via mqtt for camera (default: shown below)
|
|
# NOTE: Only applies to publishing image data to MQTT via 'frigate/<camera_name>/<object_name>/snapshot'.
|
|
# All other messages will still be published.
|
|
# enabled: True
|
|
# Optional: print a timestamp on the snapshots (default: shown below)
|
|
# timestamp: True
|
|
# Optional: draw bounding box on the snapshots (default: shown below)
|
|
# bounding_box: True
|
|
# Optional: crop the snapshot (default: shown below)
|
|
# crop: True
|
|
# Optional: height to resize the snapshot to (default: shown below)
|
|
# height: 270
|
|
# Optional: jpeg encode quality (default: shown below)
|
|
# quality: 70
|
|
# Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones)
|
|
# required_zones: []
|
|
{{- end }}
|