edgellm / backend /config.py
wu981526092's picture
add
6a50e97
raw
history blame
1.88 kB
"""
Configuration settings for the Edge LLM API
"""
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# API Configuration
API_KEY = os.getenv("api_key", "")
BASE_URL = os.getenv("base_url", "https://aihubmix.com/v1")
# Available models configuration
AVAILABLE_MODELS = {
# API models (AiHubMix) - Prioritized first
"Qwen/Qwen3-30B-A3B": {
"name": "Qwen3-30B-A3B",
"supports_thinking": True,
"description": "API: Qwen3 with dynamic thinking modes",
"size_gb": "API",
"type": "api"
},
# Local models (for local development)
"Qwen/Qwen3-4B-Thinking-2507": {
"name": "Qwen3-4B-Thinking-2507",
"supports_thinking": True,
"description": "Local: Shows thinking process",
"size_gb": "~8GB",
"type": "local"
},
"Qwen/Qwen3-4B-Instruct-2507": {
"name": "Qwen3-4B-Instruct-2507",
"supports_thinking": False,
"description": "Local: Direct instruction following",
"size_gb": "~8GB",
"type": "local"
},
"qwen2.5-vl-72b-instruct": {
"name": "Qwen2.5-VL-72B-Instruct",
"supports_thinking": False,
"description": "API: Multimodal model with vision",
"size_gb": "API",
"type": "api"
},
"Qwen/QVQ-72B-Preview": {
"name": "QVQ-72B-Preview",
"supports_thinking": True,
"description": "API: Visual reasoning with thinking",
"size_gb": "API",
"type": "api"
}
}
# CORS settings
CORS_ORIGINS = ["*"] # Allow all origins for HF Space
# Static files directory - point directly to frontend build
FRONTEND_DIST_DIR = "frontend/dist"
ASSETS_DIR = "frontend/dist/assets"
# Server settings (port will be dynamically determined)
HOST = "0.0.0.0"
DEFAULT_PORT = int(os.getenv("PORT", "0")) # 0 means auto-assign a free port