102 lines
3.4 KiB
YAML
102 lines
3.4 KiB
YAML
image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1@sha256:d92d163a89e6e2b546befe72e71d73c13a2a2222451364922675618d805cf2f6
|
|
ffmpegImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-ffmpeg-core@sha256:08492d52715d11d78c7cd88732b894e846dfb0586fed3715b20bd3555b9ad190
|
|
cublasCuda12Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-cublas-cuda12-core@sha256:b6dffaeae8675679f591f6622d4879456b04825cccef23f8caa6527adfa82b30
|
|
cublasCuda12FfmpegImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-cublas-cuda12-ffmpeg-core@sha256:4fffb50c80ef705a9ea00aec8cbf02235f3942d077bc58c2dc29b42e7ea02464
|
|
cublasCuda11Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-cublas-cuda11-core@sha256:15c70733982565a5247287407c0d69ebc52efbb9bb60abb99407f19c0b6cfd0e
|
|
cublasCuda11FfmpegImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-cublas-cuda11-ffmpeg-core@sha256:bd2cacfe5c6b1809b3ef6dd7a3a5ca7e455b75d6328f57d75e0a941363b59b53
|
|
allInOneCuda12Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-aio-gpu-nvidia-cuda-12@sha256:58cd87cf70a69c8eea1c73ce346c6fcbecc2de4b3d0160c03adc24246cba4ab7
|
|
allInOneCuda11Image:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-aio-gpu-nvidia-cuda-11@sha256:dc1d4c6e01653021dcb47e6dc035597bb4cc578b41f4d22bd6427a42fe4c1d4c
|
|
allInOneCpuImage:
|
|
repository: docker.io/localai/localai
|
|
pullPolicy: IfNotPresent
|
|
tag: v2.20.1-aio-cpu@sha256:e1400072b5e3ec42fb504faafadcc4aa20a696c3f30b8edb9b923532942965b9
|
|
securityContext:
|
|
container:
|
|
runAsNonRoot: false
|
|
readOnlyRootFilesystem: false
|
|
runAsUser: 0
|
|
runAsGroup: 0
|
|
service:
|
|
main:
|
|
ports:
|
|
main:
|
|
protocol: http
|
|
port: 8080
|
|
localai:
|
|
# Specify a build type. Available: cublas, openblas, clblas.
|
|
build_type: "openblas"
|
|
debug: false
|
|
cors: true
|
|
cors_allow_origins: "*"
|
|
galleries: []
|
|
# - name: model-gallery
|
|
# url: github:go-skynet/model-gallery/index.yaml
|
|
preload_models: []
|
|
# url: github:go-skynet/model-gallery/gpt4all-j.yaml
|
|
# UPLOAD_LIMIT
|
|
workload:
|
|
main:
|
|
podSpec:
|
|
containers:
|
|
main:
|
|
probes:
|
|
liveness:
|
|
enabled: true
|
|
type: http
|
|
path: /readyz
|
|
readiness:
|
|
enabled: true
|
|
type: http
|
|
path: /readyz
|
|
startup:
|
|
enabled: true
|
|
type: tcp
|
|
imageSelector: image
|
|
env:
|
|
ADDRESS: ":{{ .Values.service.main.ports.main.port }}"
|
|
MODELS_PATH: "{{ .Values.persistence.models.mountPath }}"
|
|
IMAGE_PATH: "{{ .Values.persistence.images.mountPath }}"
|
|
BUILD_TYPE: "{{ .Values.localai.build_type }}"
|
|
# breaks chart if true, keep it false.
|
|
REBUILD: false
|
|
DEBUG: "{{ .Values.localai.debug }}"
|
|
CORS: "{{ .Values.localai.cors }}"
|
|
GALLERIES: "{{ toJson .Values.localai.galleries }}"
|
|
PRELOAD_MODELS: "{{ toJson .Values.localai.preload_models }}"
|
|
CORS_ALLOW_ORIGINS: "{{ .Values.localai.cors_allow_origins }}"
|
|
persistence:
|
|
models:
|
|
enabled: true
|
|
mountPath: "/models"
|
|
images:
|
|
enabled: true
|
|
mountPath: "/images"
|
|
portal:
|
|
open:
|
|
enabled: false
|
|
updated: true
|