Fix max_tokens calculation to respect model context window (5000 tokens) - Add dynamic max_tokens calculation based on input size - Add novita_model_context_window configuration - Prevents 400 errors when input tokens exceed available output space
Browse files- .env +163 -0
- ENV_EXAMPLE_CONTENT.txt +5 -0
- src/config.py +13 -0
- src/llm_router.py +39 -1
.env
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# =============================================================================
|
| 2 |
+
# Research AI Assistant API - Environment Configuration
|
| 3 |
+
# =============================================================================
|
| 4 |
+
# Copy this content to a file named .env and fill in your actual values
|
| 5 |
+
# Never commit .env to version control!
|
| 6 |
+
|
| 7 |
+
# =============================================================================
|
| 8 |
+
# Novita AI Configuration (REQUIRED)
|
| 9 |
+
# =============================================================================
|
| 10 |
+
# Get your API key from: https://novita.ai
|
| 11 |
+
NOVITA_API_KEY=sk_gaMaeJaUy-qQxms1NIgJuov_RotL_NZXMoQbJlNhS6M
|
| 12 |
+
|
| 13 |
+
# Dedicated endpoint base URL (default for dedicated endpoints)
|
| 14 |
+
NOVITA_BASE_URL=https://api.novita.ai/dedicated/v1/openai
|
| 15 |
+
|
| 16 |
+
# Your dedicated endpoint model ID
|
| 17 |
+
# Format: model-name:endpoint-id
|
| 18 |
+
NOVITA_MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-7B:de-1a706eeafbf3ebc2
|
| 19 |
+
|
| 20 |
+
# =============================================================================
|
| 21 |
+
# DeepSeek-R1 Optimized Settings
|
| 22 |
+
# =============================================================================
|
| 23 |
+
# Temperature: 0.5-0.7 range (0.6 recommended for DeepSeek-R1)
|
| 24 |
+
DEEPSEEK_R1_TEMPERATURE=0.6
|
| 25 |
+
|
| 26 |
+
# Force reasoning trigger: Enable to ensure DeepSeek-R1 uses reasoning pattern
|
| 27 |
+
# Set to True to add `<think>` prefix for reasoning tasks
|
| 28 |
+
DEEPSEEK_R1_FORCE_REASONING=True
|
| 29 |
+
|
| 30 |
+
# =============================================================================
|
| 31 |
+
# Token Allocation Configuration
|
| 32 |
+
# =============================================================================
|
| 33 |
+
# Maximum tokens dedicated for user input (prioritized over context)
|
| 34 |
+
# Recommended: 8000 tokens for large queries
|
| 35 |
+
USER_INPUT_MAX_TOKENS=8000
|
| 36 |
+
|
| 37 |
+
# Maximum tokens for context preparation (includes user input + context)
|
| 38 |
+
# Recommended: 28000 tokens for 32K context window models
|
| 39 |
+
CONTEXT_PREPARATION_BUDGET=28000
|
| 40 |
+
|
| 41 |
+
# Context pruning threshold (should match context_preparation_budget)
|
| 42 |
+
CONTEXT_PRUNING_THRESHOLD=28000
|
| 43 |
+
|
| 44 |
+
# Always prioritize user input over historical context
|
| 45 |
+
PRIORITIZE_USER_INPUT=True
|
| 46 |
+
|
| 47 |
+
# =============================================================================
|
| 48 |
+
# Database Configuration
|
| 49 |
+
# =============================================================================
|
| 50 |
+
# SQLite database path (default: sessions.db)
|
| 51 |
+
# Use /tmp/ for Docker/containerized environments
|
| 52 |
+
DB_PATH=sessions.db
|
| 53 |
+
|
| 54 |
+
# FAISS index path for embeddings (default: embeddings.faiss)
|
| 55 |
+
FAISS_INDEX_PATH=embeddings.faiss
|
| 56 |
+
|
| 57 |
+
# =============================================================================
|
| 58 |
+
# Cache Configuration
|
| 59 |
+
# =============================================================================
|
| 60 |
+
# HuggingFace cache directory (for any remaining model downloads)
|
| 61 |
+
HF_HOME=~/.cache/huggingface
|
| 62 |
+
TRANSFORMERS_CACHE=~/.cache/huggingface
|
| 63 |
+
|
| 64 |
+
# HuggingFace token (optional - only needed if using gated models)
|
| 65 |
+
HF_TOKEN=
|
| 66 |
+
|
| 67 |
+
# Cache TTL in seconds (default: 3600 = 1 hour)
|
| 68 |
+
CACHE_TTL=3600
|
| 69 |
+
|
| 70 |
+
# =============================================================================
|
| 71 |
+
# Session Configuration
|
| 72 |
+
# =============================================================================
|
| 73 |
+
# Session timeout in seconds (default: 3600 = 1 hour)
|
| 74 |
+
SESSION_TIMEOUT=3600
|
| 75 |
+
|
| 76 |
+
# Maximum session size in megabytes (default: 10 MB)
|
| 77 |
+
MAX_SESSION_SIZE_MB=10
|
| 78 |
+
|
| 79 |
+
# =============================================================================
|
| 80 |
+
# Performance Configuration
|
| 81 |
+
# =============================================================================
|
| 82 |
+
# Maximum worker threads for parallel processing (default: 4)
|
| 83 |
+
MAX_WORKERS=4
|
| 84 |
+
|
| 85 |
+
# =============================================================================
|
| 86 |
+
# Mobile Optimization
|
| 87 |
+
# =============================================================================
|
| 88 |
+
# Maximum tokens for mobile responses (default: 1200)
|
| 89 |
+
# Increased from 800 to allow better responses on mobile
|
| 90 |
+
MOBILE_MAX_TOKENS=1200
|
| 91 |
+
|
| 92 |
+
# Mobile request timeout in milliseconds (default: 15000)
|
| 93 |
+
MOBILE_TIMEOUT=15000
|
| 94 |
+
|
| 95 |
+
# =============================================================================
|
| 96 |
+
# API Configuration
|
| 97 |
+
# =============================================================================
|
| 98 |
+
# Flask/Gradio server port (default: 7860)
|
| 99 |
+
GRADIO_PORT=7860
|
| 100 |
+
|
| 101 |
+
# Server host (default: 0.0.0.0 for all interfaces)
|
| 102 |
+
GRADIO_HOST=0.0.0.0
|
| 103 |
+
|
| 104 |
+
# =============================================================================
|
| 105 |
+
# Logging Configuration
|
| 106 |
+
# =============================================================================
|
| 107 |
+
# Logging level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default: INFO)
|
| 108 |
+
LOG_LEVEL=INFO
|
| 109 |
+
|
| 110 |
+
# Log format: json or text (default: json)
|
| 111 |
+
LOG_FORMAT=json
|
| 112 |
+
|
| 113 |
+
# Log directory (default: /tmp/logs)
|
| 114 |
+
LOG_DIR=/tmp/logs
|
| 115 |
+
|
| 116 |
+
# =============================================================================
|
| 117 |
+
# Context Configuration
|
| 118 |
+
# =============================================================================
|
| 119 |
+
# Maximum context tokens (default: 4000)
|
| 120 |
+
# Note: This is overridden by CONTEXT_PREPARATION_BUDGET if set
|
| 121 |
+
MAX_CONTEXT_TOKENS=4000
|
| 122 |
+
|
| 123 |
+
# Cache TTL for context in seconds (default: 300 = 5 minutes)
|
| 124 |
+
CACHE_TTL_SECONDS=300
|
| 125 |
+
|
| 126 |
+
# Maximum cache size (default: 100)
|
| 127 |
+
MAX_CACHE_SIZE=100
|
| 128 |
+
|
| 129 |
+
# Enable parallel processing (default: True)
|
| 130 |
+
PARALLEL_PROCESSING=True
|
| 131 |
+
|
| 132 |
+
# Context decay factor (default: 0.8)
|
| 133 |
+
CONTEXT_DECAY_FACTOR=0.8
|
| 134 |
+
|
| 135 |
+
# Maximum interactions to keep in context (default: 10)
|
| 136 |
+
MAX_INTERACTIONS_TO_KEEP=10
|
| 137 |
+
|
| 138 |
+
# Enable metrics collection (default: True)
|
| 139 |
+
ENABLE_METRICS=True
|
| 140 |
+
|
| 141 |
+
# Enable context compression (default: True)
|
| 142 |
+
COMPRESSION_ENABLED=True
|
| 143 |
+
|
| 144 |
+
# Summarization threshold in tokens (default: 2000)
|
| 145 |
+
SUMMARIZATION_THRESHOLD=2000
|
| 146 |
+
|
| 147 |
+
# =============================================================================
|
| 148 |
+
# Model Selection (for context operations - if still using local models)
|
| 149 |
+
# =============================================================================
|
| 150 |
+
# These are optional and only used if local models are still needed
|
| 151 |
+
# for context summarization or other operations
|
| 152 |
+
CONTEXT_SUMMARIZATION_MODEL=Qwen/Qwen2.5-7B-Instruct
|
| 153 |
+
CONTEXT_INTENT_MODEL=Qwen/Qwen2.5-7B-Instruct
|
| 154 |
+
CONTEXT_SYNTHESIS_MODEL=Qwen/Qwen2.5-7B-Instruct
|
| 155 |
+
|
| 156 |
+
# =============================================================================
|
| 157 |
+
# Security Notes
|
| 158 |
+
# =============================================================================
|
| 159 |
+
# - Never commit .env file to version control
|
| 160 |
+
# - Keep API keys secret and rotate them regularly
|
| 161 |
+
# - Use environment variables in production (not .env files)
|
| 162 |
+
# - Set proper file permissions: chmod 600 .env
|
| 163 |
+
|
ENV_EXAMPLE_CONTENT.txt
CHANGED
|
@@ -44,6 +44,11 @@ CONTEXT_PRUNING_THRESHOLD=28000
|
|
| 44 |
# Always prioritize user input over historical context
|
| 45 |
PRIORITIZE_USER_INPUT=True
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
# =============================================================================
|
| 48 |
# Database Configuration
|
| 49 |
# =============================================================================
|
|
|
|
| 44 |
# Always prioritize user input over historical context
|
| 45 |
PRIORITIZE_USER_INPUT=True
|
| 46 |
|
| 47 |
+
# Model context window (actual limit for your deployed model)
|
| 48 |
+
# Default: 5000 tokens (adjust based on your Novita AI deployment)
|
| 49 |
+
# This is the maximum total tokens (input + output) the model can handle
|
| 50 |
+
NOVITA_MODEL_CONTEXT_WINDOW=5000
|
| 51 |
+
|
| 52 |
# =============================================================================
|
| 53 |
# Database Configuration
|
| 54 |
# =============================================================================
|
src/config.py
CHANGED
|
@@ -232,6 +232,13 @@ class Settings(BaseSettings):
|
|
| 232 |
env="PRIORITIZE_USER_INPUT"
|
| 233 |
)
|
| 234 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
@validator("novita_api_key", pre=True)
|
| 236 |
def validate_novita_api_key(cls, v):
|
| 237 |
"""Validate and clean Novita API key"""
|
|
@@ -266,6 +273,12 @@ class Settings(BaseSettings):
|
|
| 266 |
val = int(v) if v else 28000
|
| 267 |
return max(4000, min(120000, val))
|
| 268 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
# ==================== Model Configuration ====================
|
| 270 |
|
| 271 |
default_model: str = Field(
|
|
|
|
| 232 |
env="PRIORITIZE_USER_INPUT"
|
| 233 |
)
|
| 234 |
|
| 235 |
+
# Model Context Window Configuration
|
| 236 |
+
novita_model_context_window: int = Field(
|
| 237 |
+
default=5000,
|
| 238 |
+
description="Maximum context window for Novita AI model (input + output tokens)",
|
| 239 |
+
env="NOVITA_MODEL_CONTEXT_WINDOW"
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
@validator("novita_api_key", pre=True)
|
| 243 |
def validate_novita_api_key(cls, v):
|
| 244 |
"""Validate and clean Novita API key"""
|
|
|
|
| 273 |
val = int(v) if v else 28000
|
| 274 |
return max(4000, min(120000, val))
|
| 275 |
|
| 276 |
+
@validator("novita_model_context_window", pre=True)
|
| 277 |
+
def validate_context_window(cls, v):
|
| 278 |
+
"""Validate context window size"""
|
| 279 |
+
val = int(v) if v else 5000
|
| 280 |
+
return max(1000, min(200000, val)) # Reasonable bounds
|
| 281 |
+
|
| 282 |
# ==================== Model Configuration ====================
|
| 283 |
|
| 284 |
default_model: str = Field(
|
src/llm_router.py
CHANGED
|
@@ -108,7 +108,7 @@ class LLMRouter:
|
|
| 108 |
model_name = kwargs.get('model', self.settings.novita_model)
|
| 109 |
|
| 110 |
# Get optimized parameters
|
| 111 |
-
|
| 112 |
temperature = kwargs.get('temperature',
|
| 113 |
model_config.get('temperature', self.settings.deepseek_r1_temperature))
|
| 114 |
top_p = kwargs.get('top_p', model_config.get('top_p', 0.95))
|
|
@@ -117,6 +117,9 @@ class LLMRouter:
|
|
| 117 |
# Format prompt according to DeepSeek-R1 best practices
|
| 118 |
formatted_prompt = self._format_deepseek_r1_prompt(prompt, task_type, model_config)
|
| 119 |
|
|
|
|
|
|
|
|
|
|
| 120 |
# IMPORTANT: No system prompt - all instructions in user prompt
|
| 121 |
messages = [{"role": "user", "content": formatted_prompt}]
|
| 122 |
|
|
@@ -164,6 +167,41 @@ class LLMRouter:
|
|
| 164 |
logger.error(f"Error calling Novita AI API: {e}", exc_info=True)
|
| 165 |
raise
|
| 166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
def _format_deepseek_r1_prompt(self, prompt: str, task_type: str, model_config: dict) -> str:
|
| 168 |
"""
|
| 169 |
Format prompt according to DeepSeek-R1 best practices:
|
|
|
|
| 108 |
model_name = kwargs.get('model', self.settings.novita_model)
|
| 109 |
|
| 110 |
# Get optimized parameters
|
| 111 |
+
requested_max_tokens = kwargs.get('max_tokens', model_config.get('max_tokens', 4096))
|
| 112 |
temperature = kwargs.get('temperature',
|
| 113 |
model_config.get('temperature', self.settings.deepseek_r1_temperature))
|
| 114 |
top_p = kwargs.get('top_p', model_config.get('top_p', 0.95))
|
|
|
|
| 117 |
# Format prompt according to DeepSeek-R1 best practices
|
| 118 |
formatted_prompt = self._format_deepseek_r1_prompt(prompt, task_type, model_config)
|
| 119 |
|
| 120 |
+
# IMPORTANT: Calculate safe max_tokens based on input size
|
| 121 |
+
max_tokens = self._calculate_safe_max_tokens(formatted_prompt, requested_max_tokens)
|
| 122 |
+
|
| 123 |
# IMPORTANT: No system prompt - all instructions in user prompt
|
| 124 |
messages = [{"role": "user", "content": formatted_prompt}]
|
| 125 |
|
|
|
|
| 167 |
logger.error(f"Error calling Novita AI API: {e}", exc_info=True)
|
| 168 |
raise
|
| 169 |
|
| 170 |
+
def _calculate_safe_max_tokens(self, prompt: str, requested_max_tokens: int) -> int:
|
| 171 |
+
"""
|
| 172 |
+
Calculate safe max_tokens based on input token count and model context window.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
prompt: Input prompt text
|
| 176 |
+
requested_max_tokens: Desired max_tokens value
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
int: Adjusted max_tokens that fits within context window
|
| 180 |
+
"""
|
| 181 |
+
# Estimate input tokens (rough: 1 token ≈ 4 characters)
|
| 182 |
+
# For more accuracy, you could use tiktoken if available
|
| 183 |
+
input_tokens = len(prompt) // 4
|
| 184 |
+
|
| 185 |
+
# Get model context window
|
| 186 |
+
context_window = self.settings.novita_model_context_window
|
| 187 |
+
|
| 188 |
+
# Reserve minimum 100 tokens for safety margin
|
| 189 |
+
available_tokens = context_window - input_tokens - 100
|
| 190 |
+
|
| 191 |
+
# Use the smaller of requested or available
|
| 192 |
+
safe_max_tokens = min(requested_max_tokens, available_tokens)
|
| 193 |
+
|
| 194 |
+
# Ensure minimum of 50 tokens for output
|
| 195 |
+
safe_max_tokens = max(50, safe_max_tokens)
|
| 196 |
+
|
| 197 |
+
if safe_max_tokens < requested_max_tokens:
|
| 198 |
+
logger.warning(
|
| 199 |
+
f"Reduced max_tokens from {requested_max_tokens} to {safe_max_tokens} "
|
| 200 |
+
f"(input: ~{input_tokens} tokens, context window: {context_window} tokens)"
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
return safe_max_tokens
|
| 204 |
+
|
| 205 |
def _format_deepseek_r1_prompt(self, prompt: str, task_type: str, model_config: dict) -> str:
|
| 206 |
"""
|
| 207 |
Format prompt according to DeepSeek-R1 best practices:
|