Spaces:
Sleeping
Sleeping
| # Use .env.local to change these variables | |
| # DO NOT EDIT THIS FILE WITH SENSITIVE DATA | |
| ### Models ### | |
| # Models are sourced exclusively from an OpenAI-compatible base URL. | |
| # Example: https://router.huggingface.co/v1 | |
| OPENAI_BASE_URL=https://router.huggingface.co/v1 | |
| # Canonical auth token for any OpenAI-compatible provider | |
| OPENAI_API_KEY=#your provider API key (works for HF router, OpenAI, LM Studio, etc.). | |
| # When set to true, user token will be used for inference calls | |
| USE_USER_TOKEN=false | |
| # Automatically redirect to oauth login page if user is not logged in, when set to "true" | |
| AUTOMATIC_LOGIN=false | |
| ### MongoDB ### | |
| MONGODB_URL=#your mongodb URL here, use chat-ui-db image if you don't want to set this | |
| MONGODB_DB_NAME=chat-ui | |
| MONGODB_DIRECT_CONNECTION=false | |
| ## Public app configuration ## | |
| PUBLIC_APP_NAME=ChatUI # name used as title throughout the app | |
| PUBLIC_APP_ASSETS=chatui # used to find logos & favicons in static/$PUBLIC_APP_ASSETS | |
| PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone."# description used throughout the app | |
| PUBLIC_SMOOTH_UPDATES=false # set to true to enable smoothing of messages client-side, can be CPU intensive | |
| PUBLIC_ORIGIN= | |
| PUBLIC_SHARE_PREFIX= | |
| PUBLIC_GOOGLE_ANALYTICS_ID= | |
| PUBLIC_PLAUSIBLE_SCRIPT_URL= | |
| PUBLIC_APPLE_APP_ID= | |
| COUPLE_SESSION_WITH_COOKIE_NAME= | |
| # when OPEN_ID is configured, users are required to login after the welcome modal | |
| OPENID_CLIENT_ID="" # You can set to "__CIMD__" for automatic oauth app creation when deployed | |
| OPENID_CLIENT_SECRET= | |
| OPENID_SCOPES="openid profile inference-api" | |
| USE_USER_TOKEN= | |
| AUTOMATIC_LOGIN=# if true authentication is required on all routes | |
| ### Local Storage ### | |
| MONGO_STORAGE_PATH= # where is the db folder stored | |
| ## Models overrides | |
| MODELS= | |
| ## Task model | |
| # Optional: set to the model id/name from the `${OPENAI_BASE_URL}/models` list | |
| # to use for internal tasks (title summarization, etc). If not set, the current model will be used | |
| TASK_MODEL= | |
| # Arch router (OpenAI-compatible) endpoint base URL used for route selection | |
| # Example: https://api.openai.com/v1 or your hosted Arch endpoint | |
| LLM_ROUTER_ARCH_BASE_URL= | |
| ## LLM Router Configuration | |
| # Path to routes policy (JSON array). Defaults to llm-router/routes.chat.json | |
| LLM_ROUTER_ROUTES_PATH= | |
| # Model used at the Arch router endpoint for selection | |
| LLM_ROUTER_ARCH_MODEL= | |
| # Fallback behavior | |
| # Route to map "other" to (must exist in routes file) | |
| LLM_ROUTER_OTHER_ROUTE=casual_conversation | |
| # Model to call if the Arch selection fails entirely | |
| LLM_ROUTER_FALLBACK_MODEL= | |
| # Arch selection timeout in milliseconds (default 10000) | |
| LLM_ROUTER_ARCH_TIMEOUT_MS=10000 | |
| # Maximum length (in characters) for assistant messages sent to router for route selection (default 500) | |
| LLM_ROUTER_MAX_ASSISTANT_LENGTH=500 | |
| # Maximum length (in characters) for previous user messages sent to router (latest user message not trimmed, default 400) | |
| LLM_ROUTER_MAX_PREV_USER_LENGTH=400 | |
| # Enable router multimodal fallback (set to true to allow image inputs via router) | |
| LLM_ROUTER_ENABLE_MULTIMODAL=false | |
| # Optional: specific model to use for multimodal requests. If not set, uses first multimodal model | |
| LLM_ROUTER_MULTIMODAL_MODEL= | |
| # Router UI overrides (client-visible) | |
| # Public display name for the router entry in the model list. Defaults to "Omni". | |
| PUBLIC_LLM_ROUTER_DISPLAY_NAME=Omni | |
| # Optional: public logo URL for the router entry. If unset, the UI shows a Carbon icon. | |
| PUBLIC_LLM_ROUTER_LOGO_URL= | |
| # Public alias id used for the virtual router model (Omni). Defaults to "omni". | |
| PUBLIC_LLM_ROUTER_ALIAS_ID=omni | |
| ### Authentication ### | |
| # Parameters to enable open id login | |
| OPENID_CONFIG= | |
| # if it's defined, only these emails will be allowed to use login | |
| ALLOWED_USER_EMAILS=[] | |
| # If it's defined, users with emails matching these domains will also be allowed to use login | |
| ALLOWED_USER_DOMAINS=[] | |
| # valid alternative redirect URLs for OAuth, used for HuggingChat apps | |
| ALTERNATIVE_REDIRECT_URLS=[] | |
| ### Cookies | |
| # name of the cookie used to store the session | |
| COOKIE_NAME=hf-chat | |
| # If the value of this cookie changes, the session is destroyed. Useful if chat-ui is deployed on a subpath | |
| # of your domain, and you want chat ui sessions to reset if the user's auth changes | |
| COUPLE_SESSION_WITH_COOKIE_NAME= | |
| # specify secure behaviour for cookies | |
| COOKIE_SAMESITE=# can be "lax", "strict", "none" or left empty | |
| COOKIE_SECURE=# set to true to only allow cookies over https | |
| TRUSTED_EMAIL_HEADER=# header to use to get the user email, only use if you know what you are doing | |
| ### Admin stuff ### | |
| ADMIN_CLI_LOGIN=true # set to false to disable the CLI login | |
| ADMIN_TOKEN=#We recommend leaving this empty, you can get the token from the terminal. | |
| ### Feature Flags ### | |
| LLM_SUMMARIZATION=true # generate conversation titles with LLMs | |
| ALLOW_IFRAME=true # Allow the app to be embedded in an iframe | |
| ENABLE_DATA_EXPORT=true | |
| ### Rate limits ### | |
| # See `src/lib/server/usageLimits.ts` | |
| # { | |
| # conversations: number, # how many conversations | |
| # messages: number, # how many messages in a conversation | |
| # assistants: number, # how many assistants | |
| # messageLength: number, # how long can a message be before we cut it off | |
| # messagesPerMinute: number, # how many messages per minute | |
| # tools: number # how many tools | |
| # } | |
| USAGE_LIMITS={} | |
| ### HuggingFace specific ### | |
| ## Feature flag & admin settings | |
| # Used for setting early access & admin flags to users | |
| HF_ORG_ADMIN= | |
| HF_ORG_EARLY_ACCESS= | |
| WEBHOOK_URL_REPORT_ASSISTANT=#provide slack webhook url to get notified for reports/feature requests | |
| ### Metrics ### | |
| METRICS_ENABLED=false | |
| METRICS_PORT=5565 | |
| LOG_LEVEL=info | |
| ### Parquet export ### | |
| # Not in use anymore but useful to export conversations to a parquet file as a HuggingFace dataset | |
| PARQUET_EXPORT_DATASET= | |
| PARQUET_EXPORT_HF_TOKEN= | |
| ADMIN_API_SECRET=# secret to admin API calls, like computing usage stats or exporting parquet data | |
| ### Config ### | |
| ENABLE_CONFIG_MANAGER=true | |
| ### Docker build variables ### | |
| # These values cannot be updated at runtime | |
| # They need to be passed when building the docker image | |
| # See https://github.com/huggingface/chat-ui/main/.github/workflows/deploy-prod.yml#L44-L47 | |
| APP_BASE="" # base path of the app, e.g. /chat, left blank as default | |
| ### Body size limit for SvelteKit https://svelte.dev/docs/kit/adapter-node#Environment-variables-BODY_SIZE_LIMIT | |
| BODY_SIZE_LIMIT=15728640 | |
| PUBLIC_COMMIT_SHA= | |
| ### LEGACY parameters | |
| ALLOW_INSECURE_COOKIES=false # LEGACY! Use COOKIE_SECURE and COOKIE_SAMESITE instead | |
| PARQUET_EXPORT_SECRET=#DEPRECATED, use ADMIN_API_SECRET instead | |
| RATE_LIMIT= # /!\ DEPRECATED definition of messages per minute. Use USAGE_LIMITS.messagesPerMinute instead | |
| OPENID_NAME_CLAIM="name" # Change to "username" for some providers that do not provide name | |
| OPENID_PROVIDER_URL=https://huggingface.co # for Google, use https://accounts.google.com | |
| OPENID_TOLERANCE= | |
| OPENID_RESOURCE= | |
| EXPOSE_API=# deprecated, API is now always exposed | |