diff --git a/incubator/anything-llm/0.0.2/ix_values.yaml b/incubator/anything-llm/0.0.2/ix_values.yaml deleted file mode 100644 index 5f6f7463a0f..00000000000 --- a/incubator/anything-llm/0.0.2/ix_values.yaml +++ /dev/null @@ -1,46 +0,0 @@ -image: - repository: ghcr.io/mintplex-labs/anything-llm - pullPolicy: IfNotPresent - tag: latest@sha256:398218457ffe5e3be2d686f99ed759a9683075030f72832a89c4cc4429c481ac -securityContext: - container: - readOnlyRootFilesystem: false - runAsUser: 0 - runAsGroup: 0 - capabilities: - add: - - SYS_ADMIN - -service: - main: - ports: - main: - protocol: http - port: 3001 - -workload: - main: - podSpec: - containers: - main: - env: - SERVER_PORT: "{{ .Values.service.main.ports.main.port }}" - STORAGE_DIR: "{{.Values.persistence.storage.mountPath }}" - # forces users to use ingress if https is needed. - # keep false. - ENABLE_HTTPS: false - -persistence: - storage: - enabled: true - mountPath: "/app/server/storage" - hotdir: - enabled: true - mountPath: "/app/collector/hotdir" - outputs: - enabled: true - mountPath: "/app/collector/outputs" - -portal: - open: - enabled: true diff --git a/incubator/anything-llm/0.0.2/templates/common.yaml b/incubator/anything-llm/0.0.2/templates/common.yaml deleted file mode 100644 index b51394e00a4..00000000000 --- a/incubator/anything-llm/0.0.2/templates/common.yaml +++ /dev/null @@ -1 +0,0 @@ -{{ include "tc.v1.common.loader.all" . }} diff --git a/incubator/anything-llm/0.0.2/CHANGELOG.md b/incubator/anything-llm/0.0.3/CHANGELOG.md similarity index 65% rename from incubator/anything-llm/0.0.2/CHANGELOG.md rename to incubator/anything-llm/0.0.3/CHANGELOG.md index 68efccf44ad..f4a5d16356a 100644 --- a/incubator/anything-llm/0.0.2/CHANGELOG.md +++ b/incubator/anything-llm/0.0.3/CHANGELOG.md @@ -8,4 +8,5 @@ title: Changelog + ## [anything-llm-0.0.1]anything-llm-0.0.1 (2024-01-21) \ No newline at end of file diff --git a/incubator/anything-llm/0.0.2/Chart.yaml b/incubator/anything-llm/0.0.3/Chart.yaml similarity index 98% rename from incubator/anything-llm/0.0.2/Chart.yaml rename to incubator/anything-llm/0.0.3/Chart.yaml index 998180ad742..fe5911a9d67 100644 --- a/incubator/anything-llm/0.0.2/Chart.yaml +++ b/incubator/anything-llm/0.0.3/Chart.yaml @@ -34,4 +34,4 @@ sources: - https://github.com/truecharts/charts/tree/master/charts/incubator/anything-llm - https://ghcr.io/mintplex-labs/anything-llm type: application -version: 0.0.2 +version: 0.0.3 diff --git a/incubator/anything-llm/0.0.2/README.md b/incubator/anything-llm/0.0.3/README.md similarity index 100% rename from incubator/anything-llm/0.0.2/README.md rename to incubator/anything-llm/0.0.3/README.md diff --git a/incubator/anything-llm/0.0.2/app-readme.md b/incubator/anything-llm/0.0.3/app-readme.md similarity index 100% rename from incubator/anything-llm/0.0.2/app-readme.md rename to incubator/anything-llm/0.0.3/app-readme.md diff --git a/incubator/anything-llm/0.0.2/charts/common-17.2.31.tgz b/incubator/anything-llm/0.0.3/charts/common-17.2.31.tgz similarity index 100% rename from incubator/anything-llm/0.0.2/charts/common-17.2.31.tgz rename to incubator/anything-llm/0.0.3/charts/common-17.2.31.tgz diff --git a/incubator/anything-llm/0.0.3/ix_values.yaml b/incubator/anything-llm/0.0.3/ix_values.yaml new file mode 100644 index 00000000000..c6819d1cd81 --- /dev/null +++ b/incubator/anything-llm/0.0.3/ix_values.yaml @@ -0,0 +1,155 @@ +image: + repository: ghcr.io/mintplex-labs/anything-llm + pullPolicy: IfNotPresent + tag: latest@sha256:6313fe9fa6c65e294da8a8b3066fdb8397d42df73b43b1800a289cd87f1a68cb +securityContext: + container: + readOnlyRootFilesystem: false + runAsUser: 0 + runAsGroup: 0 + capabilities: + add: + - SYS_ADMIN + +service: + main: + ports: + main: + protocol: http + port: 3001 + +workload: + main: + podSpec: + containers: + main: + env: + SERVER_PORT: "{{ .Values.service.main.ports.main.port }}" + STORAGE_DIR: "{{.Values.persistence.storage.mountPath }}" + # forces users to use ingress if https is needed. + # keep false. + ENABLE_HTTPS: false + JWT_SECRET: + secretKeyRef: + name: anythinglmm-secrets + key: JWT_SECRET + # LLM_PROVIDER='openai' + # OPEN_AI_KEY= + # OPEN_MODEL_PREF='gpt-3.5-turbo' + + # LLM_PROVIDER='gemini' + # GEMINI_API_KEY= + # GEMINI_LLM_MODEL_PREF='gemini-pro' + + # LLM_PROVIDER='azure' + # AZURE_OPENAI_KEY= + # AZURE_OPENAI_ENDPOINT= + # OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model. + # EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 + + # LLM_PROVIDER='anthropic' + # ANTHROPIC_API_KEY=sk-ant-xxxx + # ANTHROPIC_MODEL_PREF='claude-2' + + # LLM_PROVIDER='lmstudio' + # LMSTUDIO_BASE_PATH='http://your-server:1234/v1' + # LMSTUDIO_MODEL_TOKEN_LIMIT=4096 + + # LLM_PROVIDER='localai' + # LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1' + # LOCAL_AI_MODEL_PREF='luna-ai-llama2' + # LOCAL_AI_MODEL_TOKEN_LIMIT=4096 + # LOCAL_AI_API_KEY="sk-123abc" + + # LLM_PROVIDER='ollama' + # OLLAMA_BASE_PATH='http://host.docker.internal:11434' + # OLLAMA_MODEL_PREF='llama2' + # OLLAMA_MODEL_TOKEN_LIMIT=4096 + + # LLM_PROVIDER='togetherai' + # TOGETHER_AI_API_KEY='my-together-ai-key' + # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1' + + # LLM_PROVIDER='mistral' + # MISTRAL_API_KEY='example-mistral-ai-api-key' + # MISTRAL_MODEL_PREF='mistral-tiny' + + # LLM_PROVIDER='huggingface' + # HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud + # HUGGING_FACE_LLM_API_KEY=hf_xxxxxx + # HUGGING_FACE_LLM_TOKEN_LIMIT=8000 + + # EMBEDDING_ENGINE='openai' + # OPEN_AI_KEY=sk-xxxx + # EMBEDDING_MODEL_PREF='text-embedding-ada-002' + + # EMBEDDING_ENGINE='azure' + # AZURE_OPENAI_ENDPOINT= + # AZURE_OPENAI_KEY= + # EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002 + + # EMBEDDING_ENGINE='localai' + # EMBEDDING_BASE_PATH='http://localhost:8080/v1' + # EMBEDDING_MODEL_PREF='text-embedding-ada-002' + # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be + + # Enable all below if you are using vector database: Chroma. + # VECTOR_DB="chroma" + # CHROMA_ENDPOINT='http://host.docker.internal:8000' + # CHROMA_API_HEADER="X-Api-Key" + # CHROMA_API_KEY="sk-123abc" + + # VECTOR_DB="pinecone" + # PINECONE_API_KEY= + # PINECONE_INDEX= + + # VECTOR_DB="lancedb" + + # VECTOR_DB="weaviate" + # WEAVIATE_ENDPOINT="http://localhost:8080" + # WEAVIATE_API_KEY= + + # VECTOR_DB="qdrant" + # QDRANT_ENDPOINT="http://localhost:6333" + # QDRANT_API_KEY= + + # VECTOR_DB="milvus" + # MILVUS_ADDRESS="http://localhost:19530" + # MILVUS_USERNAME= + # MILVUS_PASSWORD= + + # VECTOR_DB="zilliz" + # ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com" + # ZILLIZ_API_TOKEN=api-token-here + + # VECTOR_DB="astra" + # ASTRA_DB_APPLICATION_TOKEN= + # ASTRA_DB_ENDPOINT= + + # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting. + # DISABLE_TELEMETRY="false" + + # Documentation on how to use https://github.com/kamronbatman/joi-password-complexity + # Default is only 8 char minimum + PASSWORDMINCHAR: 8 + PASSWORDMAXCHAR: 250 + PASSWORDLOWERCASE: 1 + PASSWORDUPPERCASE: 1 + PASSWORDNUMERIC: 1 + PASSWORDSYMBOL: 1 + PASSWORDREQUIREMENTS: 4 + +persistence: + storage: + enabled: true + mountPath: "/app/server/storage" + hotdir: + enabled: true + mountPath: "/app/collector/hotdir" + outputs: + enabled: true + mountPath: "/app/collector/outputs" + +portal: + open: + enabled: true diff --git a/incubator/anything-llm/0.0.2/questions.yaml b/incubator/anything-llm/0.0.3/questions.yaml similarity index 86% rename from incubator/anything-llm/0.0.2/questions.yaml rename to incubator/anything-llm/0.0.3/questions.yaml index d00561f99d4..3089859a875 100644 --- a/incubator/anything-llm/0.0.2/questions.yaml +++ b/incubator/anything-llm/0.0.3/questions.yaml @@ -132,6 +132,384 @@ questions: label: Arg schema: type: string + + - variable: env + label: Image Environment + schema: + additional_attrs: true + type: dict + attrs: + - variable: LLM_PROVIDER + label: LLM Provider + schema: + type: string + required: true + default: default + enum: + - value: "" + description: Default + - value: openai + description: OpenAI + - value: gemini + description: Gemini + - value: azure + description: Azure + - value: anthropic + description: Anthropic + - value: lmstudio + description: LMStudio + - value: localai + description: LocalAI + - value: ollama + description: Ollama + - value: togetherai + description: TogetherAI + - value: mistral + description: Mistral + - value: huggingface + description: HuggingFace + - variable: OPEN_AI_KEY + label: Open AI Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "openai"]] + - variable: GEMINI_API_KEY + label: Gemini AI Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "gemini"]] + - variable: GEMINI_LLM_MODEL_PREF + label: Gemini LLM Model Pref + schema: + type: string + default: "gemini-pro" + show_if: [[LLM_PROVIDER, "=", "gemini"]] + - variable: AZURE_OPENAI_KEY + label: Azure OpenAI Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "azure"]] + - variable: AZURE_OPENAI_ENDPOINT + label: Azure OpenAI Endpoint + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "azure"]] + - variable: ANTHROPIC_API_KEY + label: Anthropic API Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "anthropic"]] + - variable: ANTHROPIC_MODEL_PREF + label: Anthropic Model Pref + schema: + type: string + default: "claude-2" + show_if: [[LLM_PROVIDER, "=", "anthropic"]] + - variable: LMSTUDIO_BASE_PATH + label: LMStudio Base Path + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "lmstudio"]] + - variable: LMSTUDIO_MODEL_TOKEN_LIMIT + label: LMStudio Model Token Limit + schema: + type: int + default: 4096 + show_if: [[LLM_PROVIDER, "=", "lmstudio"]] + - variable: LOCAL_AI_API_KEY + label: Local AI API Key + schema: + type: string + default: "sk-123abc" + show_if: [[LLM_PROVIDER, "=", "localai"]] + - variable: LOCAL_AI_MODEL_TOKEN_LIMIT + label: Local AI Model Token Limit + schema: + type: int + default: 4096 + show_if: [[LLM_PROVIDER, "=", "localai"]] + - variable: LOCAL_AI_BASE_PATH + label: Local AI Base Path + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "localai"]] + - variable: LOCAL_AI_MODEL_PREF + label: Local AI Model Pref + schema: + type: string + default: "luna-ai-llama2" + show_if: [[LLM_PROVIDER, "=", "localai"]] + - variable: OLLAMA_BASE_PATH + label: Ollama Base Path + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "ollama"]] + - variable: OLLAMA_MODEL_PREF + label: Ollama Model Pref + schema: + type: string + default: "llama2" + show_if: [[LLM_PROVIDER, "=", "ollama"]] - variable: LOCAL_AI_API_KEY + - variable: OLLAMA_MODEL_TOKEN_LIMIT + label: Ollama Model Token Limit + schema: + type: int + default: 4096 + show_if: [[LLM_PROVIDER, "=", "ollama"]] - variable: LOCAL_AI_API_KEY + - variable: TOGETHER_AI_API_KEY + label: Together AI API Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "togetherai"]] + - variable: TOGETHER_AI_MODEL_PREF + label: Together AI Model pref + schema: + type: string + default: "mistralai/Mixtral-8x7B-Instruct-v0.1" + show_if: [[LLM_PROVIDER, "=", "togetherai"]] + - variable: MISTRAL_API_KEY + label: Mistral API Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "mistral"]] + - variable: MISTRAL_MODEL_PREF + label: Mistral Model pref + schema: + type: string + default: "mistral-tiny" + show_if: [[LLM_PROVIDER, "=", "mistral"]] + - variable: HUGGING_FACE_LLM_API_KEY + label: Hugging Face LLM API Key + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "huggingface"]] + - variable: HUGGING_FACE_LLM_TOKEN_LIMIT + label: Hugging Face LLM Token Limit + schema: + type: int + default: 8000 + show_if: [[LLM_PROVIDER, "=", "huggingface"]] + - variable: HUGGING_FACE_LLM_ENDPOINT + label: Hugging Face LLM EndPoint + schema: + type: string + default: "" + show_if: [[LLM_PROVIDER, "=", "huggingface"]] + - variable: OPEN_MODEL_PREF + label: Open Model Pref + schema: + type: string + default: "" + - variable: EMBEDDING_ENGINE + label: Embedding Engine + schema: + type: string + required: true + default: default + enum: + - value: "" + description: Default + - value: openai + description: OpenAI + - value: azure + description: Azure + - value: localai + description: LocalAI + - variable: EMBEDDING_MODEL_PREF + label: Embedding Model Pref + schema: + type: string + default: "" + - variable: EMBEDDING_BASE_PATH + label: Embedding Base Path + schema: + type: string + default: "" + - variable: EMBEDDING_MODEL_MAX_CHUNK_LENGTH + label: Embedding Model Max Chunk Length + schema: + type: int + default: 1000 + - variable: VECTOR_DB + label: Vector DB + schema: + type: string + required: true + default: default + enum: + - value: "" + description: Default + - value: chroma + description: Chroma + - value: pinecone + description: PineCone + - value: lancedb + description: LanceDB + - value: weaviate + description: Weaviate + - value: qdrant + description: Qdrant + - value: milvus + description: Milvus + - value: zilliz + description: Zilliz + - value: astra + description: Astra DB + - variable: CHROMA_API_KEY + label: Chroma API Key + schema: + type: string + default: "sk-123abc" + show_if: [[VECTOR_DB, "=", "chroma"]] + - variable: CHROMA_API_HEADER + label: Chroma API Header + schema: + type: string + default: "X-Api-Key" + show_if: [[VECTOR_DB, "=", "chroma"]] + - variable: CHROMA_ENDPOINT + label: Chroma Endpoint + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "chroma"]] + - variable: PINECONE_API_KEY + label: PineCone API Key + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "pinecone"]] + - variable: PINECONE_INDEX + label: PineCone Index + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "pinecone"]] + - variable: WEAVIATE_API_KEY + label: Weaviate API Key + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "weaviate"]] + - variable: WEAVIATE_ENDPOINT + label: Weaviate Endpoint + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "weaviate"]] + - variable: QDRANT_API_KEY + label: Qdrant API Key + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "qdrant"]] + - variable: QDRANT_ENDPOINT + label: Qdrant Endpointt + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "qdrant"]] + - variable: MILVUS_ADDRESS + label: Milvus Address + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "milvus"]] + - variable: MILVUS_USERNAME + label: Milvus UserName + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "milvus"]] + - variable: MILVUS_PASSWORD + label: Milvus Password + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "milvus"]] + - variable: ZILLIZ_API_TOKEN + label: Zilliz API Token + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "zilliz"]] + - variable: ZILLIZ_ENDPOINT + label: Zilliz EndPoint + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "zilliz"]] + - variable: ASTRA_DB_APPLICATION_TOKEN + label: Astra DB Application Token + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "astra"]] + - variable: ASTRA_DB_ENDPOINT + label: Astra DB EndPoint + schema: + type: string + default: "" + show_if: [[VECTOR_DB, "=", "astra"]] + - variable: adv_settings + label: Show Advanced Settings + schema: + type: boolean + default: false + show_subquestions_if: true + subquestions: + - variable: DISABLE_TELEMETRY + label: Disable Telemetry + schema: + type: boolean + default: false + - variable: PASSWORDMINCHAR + label: Password Min Char + schema: + type: int + default: 8 + - variable: PASSWORDMAXCHAR + label: Password Max Char + schema: + type: int + default: 250 + - variable: PASSWORDLOWERCASE + label: Password LowerCase + schema: + type: int + default: 1 + - variable: PASSWORDUPPERCASE + label: Password UpperCase + schema: + type: int + default: 1 + - variable: PASSWORDNUMERIC + label: Password Numeric + schema: + type: int + default: 1 + - variable: PASSWORDSYMBOL + label: Password Symbol + schema: + type: int + default: 1 + - variable: PASSWORDREQUIREMENTS + label: Password Requirements + schema: + type: int + default: 4 + - variable: advanced label: Show Advanced Settings description: Advanced settings are not covered by TrueCharts Support @@ -1955,13 +2333,6 @@ questions: show_if: [["certificateIssuer", "=", ""]] type: string default: "" - - variable: scaleCert - label: 'Use TrueNAS SCALE Certificate (Deprecated)' - schema: - show_if: [["certificateIssuer", "=", ""]] - type: int - $ref: - - "definitions/certificate" - variable: ingressList label: Add Manual Custom Ingresses group: Ingress @@ -2079,13 +2450,6 @@ questions: schema: type: string default: "" - - variable: scaleCert - label: Use TrueNAS SCALE Certificate (Deprecated) - schema: - show_if: [["certificateIssuer", "=", ""]] - type: int - $ref: - - "definitions/certificate" - variable: clusterCertificate label: 'Cluster Certificate (Advanced)' description: 'Add the name of your cluster-wide certificate, that you set up in the ClusterIssuer chart.' @@ -2329,6 +2693,21 @@ questions: type: string default: 8Gi valid_chars: '^(?!^0(e[0-9]|[EPTGMK]i?|)$)([0-9]+)(|[EPTGMK]i?|e[0-9]+)$' + - variable: 'intel.com/i915' + label: Add Intel i915 GPUs + schema: + type: int + default: 0 + - variable: 'nvidia.com/gpu' + label: Add NVIDIA GPUs (Experimental) + schema: + type: int + default: 0 + - variable: 'amd.com/gpu' + label: Add AMD GPUs + schema: + type: int + default: 0 - variable: requests label: "Minimum Resources Required (request)" schema: diff --git a/incubator/anything-llm/0.0.2/templates/NOTES.txt b/incubator/anything-llm/0.0.3/templates/NOTES.txt similarity index 100% rename from incubator/anything-llm/0.0.2/templates/NOTES.txt rename to incubator/anything-llm/0.0.3/templates/NOTES.txt diff --git a/incubator/anything-llm/0.0.3/templates/_secrets.tpl b/incubator/anything-llm/0.0.3/templates/_secrets.tpl new file mode 100644 index 00000000000..f03181ae0bb --- /dev/null +++ b/incubator/anything-llm/0.0.3/templates/_secrets.tpl @@ -0,0 +1,13 @@ +{{/* Define the secrets */}} +{{- define "anythinglmm.secrets" -}} +{{- $secretName := (printf "%s-anythinglmm-secrets" (include "tc.v1.common.lib.chart.names.fullname" $)) }} + +{{- $jwtSecret := randAlphaNum 64 -}} + + {{- with lookup "v1" "Secret" .Release.Namespace $secretName -}} + {{- $jwtSecret = index .data "JWT_SECRET" | b64dec -}} + {{- end }} +enabled: true +data: + JWT_SECRET: {{ $jwtSecret }} +{{- end -}} diff --git a/incubator/anything-llm/0.0.3/templates/common.yaml b/incubator/anything-llm/0.0.3/templates/common.yaml new file mode 100644 index 00000000000..bbd43da7253 --- /dev/null +++ b/incubator/anything-llm/0.0.3/templates/common.yaml @@ -0,0 +1,11 @@ +{{/* Make sure all variables are set properly */}} +{{- include "tc.v1.common.loader.init" . -}} + +{{/* Render secrets for anythinglmm */}} +{{- $secrets := include "anythinglmm.secrets" . | fromYaml -}} +{{- if $secrets -}} + {{- $_ := set .Values.secret "anythinglmm-secrets" $secrets -}} +{{- end -}} + +{{/* Render the templates */}} +{{- include "tc.v1.common.loader.apply" . -}} diff --git a/incubator/anything-llm/0.0.2/values.yaml b/incubator/anything-llm/0.0.3/values.yaml similarity index 100% rename from incubator/anything-llm/0.0.2/values.yaml rename to incubator/anything-llm/0.0.3/values.yaml