102 lines
3.4 KiB
YAML
102 lines
3.4 KiB
YAML
image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4@sha256:53b211a8f1b63f6c206395424fae4872144f924435cfa9f89fdd88610eaf29bb
|
|
ffmpegImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-ffmpeg-core@sha256:b469241f05351591632c5edc191e3d7aae6dc753bd47a0289184ea1855cda8d7
|
|
cublasCuda12Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-cublas-cuda12-core@sha256:a541199cd2fbd5d651bc977522229e07bb9a48e3eded1a8d4d2a6d0a3dbb65d0
|
|
cublasCuda12FfmpegImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-cublas-cuda12-ffmpeg-core@sha256:c08cf2b08364c15894b04b0ca483781ced8e18c7a09e29c2a94b848b44c57e60
|
|
cublasCuda11Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-cublas-cuda11-core@sha256:9dcba1d2782ca64f25c003192f9adf869ac588ef9f876259fc4ce3333ef59bee
|
|
cublasCuda11FfmpegImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-cublas-cuda11-ffmpeg-core@sha256:42eac960d04b79b4e9209ba95a9efc044eba2ab76e496b6a4303a08a88ea7ee1
|
|
allInOneCuda12Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-aio-gpu-nvidia-cuda-12@sha256:1839882834904decac4cba73e7641dd0849d348fee3318ad3d3ff29ba478b977
|
|
allInOneCuda11Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-aio-gpu-nvidia-cuda-11@sha256:75504a6b5825675dabb948794a6015a2d37c1eec73a54c10ef439c87d0857c98
|
|
allInOneCpuImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.19.4-aio-cpu@sha256:29f0d3bbb1803b79f64343137d8e00cec09192d0f9e73dd6169c407bf84afdb7
|
|
securityContext:
|
|
container:
|
|
runAsNonRoot: false
|
|
readOnlyRootFilesystem: false
|
|
runAsUser: 0
|
|
runAsGroup: 0
|
|
service:
|
|
main:
|
|
ports:
|
|
main:
|
|
protocol: http
|
|
port: 8080
|
|
localai:
|
|
# Specify a build type. Available: cublas, openblas, clblas.
|
|
build_type: "openblas"
|
|
debug: false
|
|
cors: true
|
|
cors_allow_origins: "*"
|
|
galleries: []
|
|
# - name: model-gallery
|
|
# url: github:go-skynet/model-gallery/index.yaml
|
|
preload_models: []
|
|
# url: github:go-skynet/model-gallery/gpt4all-j.yaml
|
|
# UPLOAD_LIMIT
|
|
workload:
|
|
main:
|
|
podSpec:
|
|
containers:
|
|
main:
|
|
probes:
|
|
liveness:
|
|
enabled: true
|
|
type: http
|
|
path: /readyz
|
|
readiness:
|
|
enabled: true
|
|
type: http
|
|
path: /readyz
|
|
startup:
|
|
enabled: true
|
|
type: tcp
|
|
imageSelector: image
|
|
env:
|
|
ADDRESS: ":{{ .Values.service.main.ports.main.port }}"
|
|
MODELS_PATH: "{{ .Values.persistence.models.mountPath }}"
|
|
IMAGE_PATH: "{{ .Values.persistence.images.mountPath }}"
|
|
BUILD_TYPE: "{{ .Values.localai.build_type }}"
|
|
# breaks chart if true, keep it false.
|
|
REBUILD: false
|
|
DEBUG: "{{ .Values.localai.debug }}"
|
|
CORS: "{{ .Values.localai.cors }}"
|
|
GALLERIES: "{{ toJson .Values.localai.galleries }}"
|
|
PRELOAD_MODELS: "{{ toJson .Values.localai.preload_models }}"
|
|
CORS_ALLOW_ORIGINS: "{{ .Values.localai.cors_allow_origins }}"
|
|
persistence:
|
|
models:
|
|
enabled: true
|
|
mountPath: "/models"
|
|
images:
|
|
enabled: true
|
|
mountPath: "/images"
|
|
portal:
|
|
open:
|
|
enabled: false
|
|
updated: true
|