catalog/stable/local-ai/10.19.1/ix_values.yaml

102 lines
3.4 KiB
YAML

image:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0@sha256:3473f9694c3899d9e8d3c3c887d52f4e7d46cf643a03d7903f108d097a70611f
ffmpegImage:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-ffmpeg-core@sha256:0a3c62f2a28d7a3ca233ad6c77af750540c62ab8a0bb3d04f15943845a4c2b50
cublasCuda12Image:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-cublas-cuda12-core@sha256:16753f0714a7b81530263a9990dce03767f22d0a3fe08961aca95c1d0ac77258
cublasCuda12FfmpegImage:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-cublas-cuda12-ffmpeg-core@sha256:ccf9647b91f4c4e20cdffe1de27fc4c8fe587a4554663dedf469bd49dea7d3a5
cublasCuda11Image:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-cublas-cuda11-core@sha256:7bcd70e4c7164815ac1bafaf69c8493514e13c4f84d611590cc4001fb44829d8
cublasCuda11FfmpegImage:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-cublas-cuda11-ffmpeg-core@sha256:8e90cd63d3a904d980a2a3ba5c1a78379cf9f4d37afff02d1c00a7aec279c146
allInOneCuda12Image:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-aio-gpu-nvidia-cuda-12@sha256:7c0f92e7a12593da8bb7dae41d44219913cab35ede90b340b0d4b1e1d5e0eab3
allInOneCuda11Image:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-aio-gpu-nvidia-cuda-11@sha256:5c52b338d391c2683c63fd0e382fc6c22cc1f9305701f00aa8cb9fc4a10eb37e
allInOneCpuImage:
repository: docker.io/localai/localai
pullPolicy: IfNotPresent
tag: v2.16.0-aio-cpu@sha256:e3c8e59b16e12f863a2590c15a21fbe1d45bda92c6a300604a833e9bc46b08ca
securityContext:
container:
runAsNonRoot: false
readOnlyRootFilesystem: false
runAsUser: 0
runAsGroup: 0
service:
main:
ports:
main:
protocol: http
port: 8080
localai:
# Specify a build type. Available: cublas, openblas, clblas.
build_type: "openblas"
debug: false
cors: true
cors_allow_origins: "*"
galleries: []
# - name: model-gallery
# url: github:go-skynet/model-gallery/index.yaml
preload_models: []
# url: github:go-skynet/model-gallery/gpt4all-j.yaml
# UPLOAD_LIMIT
workload:
main:
podSpec:
containers:
main:
probes:
liveness:
enabled: true
type: http
path: /readyz
readiness:
enabled: true
type: http
path: /readyz
startup:
enabled: true
type: tcp
imageSelector: image
env:
ADDRESS: ":{{ .Values.service.main.ports.main.port }}"
MODELS_PATH: "{{ .Values.persistence.models.mountPath }}"
IMAGE_PATH: "{{ .Values.persistence.images.mountPath }}"
BUILD_TYPE: "{{ .Values.localai.build_type }}"
# breaks chart if true, keep it false.
REBUILD: false
DEBUG: "{{ .Values.localai.debug }}"
CORS: "{{ .Values.localai.cors }}"
GALLERIES: "{{ toJson .Values.localai.galleries }}"
PRELOAD_MODELS: "{{ toJson .Values.localai.preload_models }}"
CORS_ALLOW_ORIGINS: "{{ .Values.localai.cors_allow_origins }}"
persistence:
models:
enabled: true
mountPath: "/models"
images:
enabled: true
mountPath: "/images"
portal:
open:
enabled: false
updated: true