2024-01-21 21:09:50 +00:00
image :
repository : ghcr.io/mintplex-labs/anything-llm
pullPolicy : IfNotPresent
2024-05-09 00:34:08 +00:00
tag : latest@sha256:a4157de59d7c1c6aec1dd4e8ceb8c82f5f720cce20ce6e7a8061ba0cce75df18
2024-01-21 21:09:50 +00:00
securityContext :
container :
readOnlyRootFilesystem : false
runAsUser : 0
runAsGroup : 0
capabilities :
add :
- SYS_ADMIN
service :
main :
ports :
main :
protocol : http
port : 3001
workload :
main :
podSpec :
containers :
main :
env :
SERVER_PORT : "{{ .Values.service.main.ports.main.port }}"
STORAGE_DIR : "{{.Values.persistence.storage.mountPath }}"
# forces users to use ingress if https is needed.
# keep false.
ENABLE_HTTPS : false
2024-02-19 12:35:33 +00:00
JWT_SECRET :
secretKeyRef :
name : anythinglmm-secrets
key : JWT_SECRET
# LLM_PROVIDER='openai'
# OPEN_AI_KEY=
# OPEN_MODEL_PREF='gpt-3.5-turbo'
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-pro'
# LLM_PROVIDER='azure'
# AZURE_OPENAI_KEY=
# AZURE_OPENAI_ENDPOINT=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# LLM_PROVIDER='anthropic'
# ANTHROPIC_API_KEY=sk-ant-xxxx
# ANTHROPIC_MODEL_PREF='claude-2'
# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='localai'
# LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1'
# LOCAL_AI_MODEL_PREF='luna-ai-llama2'
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
# LOCAL_AI_API_KEY="sk-123abc"
# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
# LLM_PROVIDER='huggingface'
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
# EMBEDDING_ENGINE='openai'
# OPEN_AI_KEY=sk-xxxx
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_ENGINE='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# EMBEDDING_ENGINE='localai'
# EMBEDDING_BASE_PATH='http://localhost:8080/v1'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
# Enable all below if you are using vector database: Chroma.
# VECTOR_DB="chroma"
# CHROMA_ENDPOINT='http://host.docker.internal:8000'
# CHROMA_API_HEADER="X-Api-Key"
# CHROMA_API_KEY="sk-123abc"
# VECTOR_DB="pinecone"
# PINECONE_API_KEY=
# PINECONE_INDEX=
# VECTOR_DB="lancedb"
# VECTOR_DB="weaviate"
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
# VECTOR_DB="qdrant"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
# VECTOR_DB="astra"
# ASTRA_DB_APPLICATION_TOKEN=
# ASTRA_DB_ENDPOINT=
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
# DISABLE_TELEMETRY="false"
# Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
# Default is only 8 char minimum
PASSWORDMINCHAR : 8
PASSWORDMAXCHAR : 250
PASSWORDLOWERCASE : 1
PASSWORDUPPERCASE : 1
PASSWORDNUMERIC : 1
PASSWORDSYMBOL : 1
PASSWORDREQUIREMENTS : 4
2024-01-21 21:09:50 +00:00
persistence :
storage :
enabled : true
mountPath : "/app/server/storage"
hotdir :
enabled : true
mountPath : "/app/collector/hotdir"
outputs :
enabled : true
mountPath : "/app/collector/outputs"
portal :
open :
enabled : true