mirror of
https://github.com/leonvanzyl/autocoder.git
synced 2026-03-16 18:33:08 +00:00
refactor: make Settings UI the single source of truth for API provider
Remove legacy env-var-based provider/mode detection that caused misleading UI badges (e.g., GLM badge showing when Settings was set to Claude). Key changes: - Remove _is_glm_mode() and _is_ollama_mode() env-var sniffing functions from server/routers/settings.py; derive glm_mode/ollama_mode purely from the api_provider setting - Remove `import os` from settings router (no longer needed) - Update schema comments to reflect settings-based derivation - Remove "(configured via .env)" from badge tooltips in App.tsx - Remove Kimi/GLM/Ollama/Playwright-headless sections from .env.example; add note pointing to Settings UI - Update CLAUDE.md and README.md documentation to reference Settings UI for alternative provider configuration - Update model IDs from claude-opus-4-5-20251101 to claude-opus-4-6 across registry, client, chat sessions, tests, and UI defaults - Add LEGACY_MODEL_MAP with auto-migration in get_all_settings() - Show model ID subtitle in SettingsModal model selector - Add Vertex passthrough test for claude-opus-4-6 (no date suffix) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
54
.env.example
54
.env.example
@@ -9,11 +9,6 @@
|
||||
# - webkit: Safari engine
|
||||
# - msedge: Microsoft Edge
|
||||
# PLAYWRIGHT_BROWSER=firefox
|
||||
#
|
||||
# PLAYWRIGHT_HEADLESS: Run browser without visible window
|
||||
# - true: Browser runs in background, saves CPU (default)
|
||||
# - false: Browser opens a visible window (useful for debugging)
|
||||
# PLAYWRIGHT_HEADLESS=true
|
||||
|
||||
# Extra Read Paths (Optional)
|
||||
# Comma-separated list of absolute paths for read-only access to external directories.
|
||||
@@ -25,56 +20,17 @@
|
||||
# Google Cloud Vertex AI Configuration (Optional)
|
||||
# To use Claude via Vertex AI on Google Cloud Platform, uncomment and set these variables.
|
||||
# Requires: gcloud CLI installed and authenticated (run: gcloud auth application-default login)
|
||||
# Note: Use @ instead of - in model names (e.g., claude-opus-4-5@20251101)
|
||||
# Note: Use @ instead of - in model names for date-suffixed models (e.g., claude-sonnet-4-5@20250929)
|
||||
#
|
||||
# CLAUDE_CODE_USE_VERTEX=1
|
||||
# CLOUD_ML_REGION=us-east5
|
||||
# ANTHROPIC_VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
# ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-5@20251101
|
||||
# ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-6
|
||||
# ANTHROPIC_DEFAULT_SONNET_MODEL=claude-sonnet-4-5@20250929
|
||||
# ANTHROPIC_DEFAULT_HAIKU_MODEL=claude-3-5-haiku@20241022
|
||||
|
||||
# ===================
|
||||
# Alternative API Providers
|
||||
# Alternative API Providers (GLM, Ollama, Kimi, Custom)
|
||||
# ===================
|
||||
# NOTE: These env vars are the legacy way to configure providers.
|
||||
# The recommended way is to use the Settings UI (API Provider section).
|
||||
# UI settings take precedence when api_provider != "claude".
|
||||
|
||||
# Kimi K2.5 (Moonshot) Configuration (Optional)
|
||||
# Get an API key at: https://kimi.com
|
||||
#
|
||||
# ANTHROPIC_BASE_URL=https://api.kimi.com/coding/
|
||||
# ANTHROPIC_API_KEY=your-kimi-api-key
|
||||
# ANTHROPIC_DEFAULT_SONNET_MODEL=kimi-k2.5
|
||||
# ANTHROPIC_DEFAULT_OPUS_MODEL=kimi-k2.5
|
||||
# ANTHROPIC_DEFAULT_HAIKU_MODEL=kimi-k2.5
|
||||
|
||||
# GLM/Alternative API Configuration (Optional)
|
||||
# To use Zhipu AI's GLM models instead of Claude, uncomment and set these variables.
|
||||
# This only affects AutoForge - your global Claude Code settings remain unchanged.
|
||||
# Get an API key at: https://z.ai/subscribe
|
||||
#
|
||||
# ANTHROPIC_BASE_URL=https://api.z.ai/api/anthropic
|
||||
# ANTHROPIC_AUTH_TOKEN=your-zhipu-api-key
|
||||
# API_TIMEOUT_MS=3000000
|
||||
# ANTHROPIC_DEFAULT_SONNET_MODEL=glm-4.7
|
||||
# ANTHROPIC_DEFAULT_OPUS_MODEL=glm-4.7
|
||||
# ANTHROPIC_DEFAULT_HAIKU_MODEL=glm-4.5-air
|
||||
|
||||
# Ollama Local Model Configuration (Optional)
|
||||
# To use local models via Ollama instead of Claude, uncomment and set these variables.
|
||||
# Requires Ollama v0.14.0+ with Anthropic API compatibility.
|
||||
# See: https://ollama.com/blog/claude
|
||||
#
|
||||
# ANTHROPIC_BASE_URL=http://localhost:11434
|
||||
# ANTHROPIC_AUTH_TOKEN=ollama
|
||||
# API_TIMEOUT_MS=3000000
|
||||
# ANTHROPIC_DEFAULT_SONNET_MODEL=qwen3-coder
|
||||
# ANTHROPIC_DEFAULT_OPUS_MODEL=qwen3-coder
|
||||
# ANTHROPIC_DEFAULT_HAIKU_MODEL=qwen3-coder
|
||||
#
|
||||
# Model recommendations:
|
||||
# - For best results, use a capable coding model like qwen3-coder or deepseek-coder-v2
|
||||
# - You can use the same model for all tiers, or different models per tier
|
||||
# - Larger models (70B+) work best for Opus tier, smaller (7B-20B) for Haiku
|
||||
# Configure alternative providers via the Settings UI (gear icon > API Provider).
|
||||
# The Settings UI is the recommended way to switch providers and models.
|
||||
|
||||
37
CLAUDE.md
37
CLAUDE.md
@@ -408,44 +408,23 @@ Run coding agents via Google Cloud Vertex AI:
|
||||
CLAUDE_CODE_USE_VERTEX=1
|
||||
CLOUD_ML_REGION=us-east5
|
||||
ANTHROPIC_VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-5@20251101
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-6
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL=claude-sonnet-4-5@20250929
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL=claude-3-5-haiku@20241022
|
||||
```
|
||||
|
||||
**Note:** Use `@` instead of `-` in model names for Vertex AI.
|
||||
|
||||
### Ollama Local Models (Optional)
|
||||
### Alternative API Providers (GLM, Ollama, Kimi, Custom)
|
||||
|
||||
Run coding agents using local models via Ollama v0.14.0+:
|
||||
Alternative providers are configured via the **Settings UI** (gear icon > API Provider section). Select a provider, set the base URL, auth token, and model — no `.env` changes needed.
|
||||
|
||||
1. Install Ollama: https://ollama.com
|
||||
2. Start Ollama: `ollama serve`
|
||||
3. Pull a coding model: `ollama pull qwen3-coder`
|
||||
4. Configure `.env`:
|
||||
```
|
||||
ANTHROPIC_BASE_URL=http://localhost:11434
|
||||
ANTHROPIC_AUTH_TOKEN=ollama
|
||||
API_TIMEOUT_MS=3000000
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL=qwen3-coder
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=qwen3-coder
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL=qwen3-coder
|
||||
```
|
||||
5. Run AutoForge normally - it will use your local Ollama models
|
||||
**Available providers:** Claude (default), GLM (Zhipu AI), Ollama (local models), Kimi (Moonshot), Custom
|
||||
|
||||
**Recommended coding models:**
|
||||
- `qwen3-coder` - Good balance of speed and capability
|
||||
- `deepseek-coder-v2` - Strong coding performance
|
||||
- `codellama` - Meta's code-focused model
|
||||
|
||||
**Model tier mapping:**
|
||||
- Use the same model for all tiers, or map different models per capability level
|
||||
- Larger models (70B+) work best for Opus tier
|
||||
- Smaller models (7B-20B) work well for Haiku tier
|
||||
|
||||
**Known limitations:**
|
||||
- Smaller context windows than Claude (model-dependent)
|
||||
- Extended context beta disabled (not supported by Ollama)
|
||||
**Ollama notes:**
|
||||
- Requires Ollama v0.14.0+ with Anthropic API compatibility
|
||||
- Install: https://ollama.com → `ollama serve` → `ollama pull qwen3-coder`
|
||||
- Recommended models: `qwen3-coder`, `deepseek-coder-v2`, `codellama`
|
||||
- Performance depends on local hardware (GPU recommended)
|
||||
|
||||
## Claude Code Integration
|
||||
|
||||
34
README.md
34
README.md
@@ -326,37 +326,13 @@ When test progress increases, the agent sends:
|
||||
}
|
||||
```
|
||||
|
||||
### Using GLM Models (Alternative to Claude)
|
||||
### Alternative API Providers (GLM, Ollama, Kimi, Custom)
|
||||
|
||||
Add these variables to your `.env` file to use Zhipu AI's GLM models:
|
||||
Alternative providers are configured via the **Settings UI** (gear icon > API Provider). Select your provider, set the base URL, auth token, and model directly in the UI — no `.env` changes needed.
|
||||
|
||||
```bash
|
||||
ANTHROPIC_BASE_URL=https://api.z.ai/api/anthropic
|
||||
ANTHROPIC_AUTH_TOKEN=your-zhipu-api-key
|
||||
API_TIMEOUT_MS=3000000
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL=glm-4.7
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=glm-4.7
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL=glm-4.5-air
|
||||
```
|
||||
Available providers: **Claude** (default), **GLM** (Zhipu AI), **Ollama** (local models), **Kimi** (Moonshot), **Custom**
|
||||
|
||||
This routes AutoForge's API requests through Zhipu's Claude-compatible API, allowing you to use GLM-4.7 and other models. **This only affects AutoForge** - your global Claude Code settings remain unchanged.
|
||||
|
||||
Get an API key at: https://z.ai/subscribe
|
||||
|
||||
### Using Ollama Local Models
|
||||
|
||||
Add these variables to your `.env` file to run agents with local models via Ollama v0.14.0+:
|
||||
|
||||
```bash
|
||||
ANTHROPIC_BASE_URL=http://localhost:11434
|
||||
ANTHROPIC_AUTH_TOKEN=ollama
|
||||
API_TIMEOUT_MS=3000000
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL=qwen3-coder
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=qwen3-coder
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL=qwen3-coder
|
||||
```
|
||||
|
||||
See the [CLAUDE.md](CLAUDE.md) for recommended models and known limitations.
|
||||
For Ollama, install [Ollama v0.14.0+](https://ollama.com), run `ollama serve`, and pull a coding model (e.g., `ollama pull qwen3-coder`). Then select "Ollama" in the Settings UI.
|
||||
|
||||
### Using Vertex AI
|
||||
|
||||
@@ -366,7 +342,7 @@ Add these variables to your `.env` file to run agents via Google Cloud Vertex AI
|
||||
CLAUDE_CODE_USE_VERTEX=1
|
||||
CLOUD_ML_REGION=us-east5
|
||||
ANTHROPIC_VERTEX_PROJECT_ID=your-gcp-project-id
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-5@20251101
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL=claude-opus-4-6
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL=claude-sonnet-4-5@20250929
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL=claude-3-5-haiku@20241022
|
||||
```
|
||||
|
||||
@@ -46,8 +46,9 @@ def convert_model_for_vertex(model: str) -> str:
|
||||
"""
|
||||
Convert model name format for Vertex AI compatibility.
|
||||
|
||||
Vertex AI uses @ to separate model name from version (e.g., claude-opus-4-5@20251101)
|
||||
while the Anthropic API uses - (e.g., claude-opus-4-5-20251101).
|
||||
Vertex AI uses @ to separate model name from version (e.g., claude-sonnet-4-5@20250929)
|
||||
while the Anthropic API uses - (e.g., claude-sonnet-4-5-20250929).
|
||||
Models without a date suffix (e.g., claude-opus-4-6) pass through unchanged.
|
||||
|
||||
Args:
|
||||
model: Model name in Anthropic format (with hyphens)
|
||||
@@ -61,7 +62,7 @@ def convert_model_for_vertex(model: str) -> str:
|
||||
return model
|
||||
|
||||
# Pattern: claude-{name}-{version}-{date} -> claude-{name}-{version}@{date}
|
||||
# Example: claude-opus-4-5-20251101 -> claude-opus-4-5@20251101
|
||||
# Example: claude-sonnet-4-5-20250929 -> claude-sonnet-4-5@20250929
|
||||
# The date is always 8 digits at the end
|
||||
match = re.match(r'^(claude-.+)-(\d{8})$', model)
|
||||
if match:
|
||||
|
||||
42
registry.py
42
registry.py
@@ -46,10 +46,16 @@ def _migrate_registry_dir() -> None:
|
||||
# Available models with display names
|
||||
# To add a new model: add an entry here with {"id": "model-id", "name": "Display Name"}
|
||||
AVAILABLE_MODELS = [
|
||||
{"id": "claude-opus-4-5-20251101", "name": "Claude Opus 4.5"},
|
||||
{"id": "claude-sonnet-4-5-20250929", "name": "Claude Sonnet 4.5"},
|
||||
{"id": "claude-opus-4-6", "name": "Claude Opus"},
|
||||
{"id": "claude-sonnet-4-5-20250929", "name": "Claude Sonnet"},
|
||||
]
|
||||
|
||||
# Map legacy model IDs to their current replacements.
|
||||
# Used by get_all_settings() to auto-migrate stale values on first read after upgrade.
|
||||
LEGACY_MODEL_MAP = {
|
||||
"claude-opus-4-5-20251101": "claude-opus-4-6",
|
||||
}
|
||||
|
||||
# List of valid model IDs (derived from AVAILABLE_MODELS)
|
||||
VALID_MODELS = [m["id"] for m in AVAILABLE_MODELS]
|
||||
|
||||
@@ -59,7 +65,7 @@ VALID_MODELS = [m["id"] for m in AVAILABLE_MODELS]
|
||||
_env_default_model = os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL")
|
||||
if _env_default_model is not None:
|
||||
_env_default_model = _env_default_model.strip()
|
||||
DEFAULT_MODEL = _env_default_model or "claude-opus-4-5-20251101"
|
||||
DEFAULT_MODEL = _env_default_model or "claude-opus-4-6"
|
||||
|
||||
# Ensure env-provided DEFAULT_MODEL is in VALID_MODELS for validation consistency
|
||||
# (idempotent: only adds if missing, doesn't alter AVAILABLE_MODELS semantics)
|
||||
@@ -598,6 +604,9 @@ def get_all_settings() -> dict[str, str]:
|
||||
"""
|
||||
Get all settings as a dictionary.
|
||||
|
||||
Automatically migrates legacy model IDs (e.g. claude-opus-4-5-20251101 -> claude-opus-4-6)
|
||||
on first read after upgrade. This is a one-time silent migration.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping setting keys to values.
|
||||
"""
|
||||
@@ -606,7 +615,26 @@ def get_all_settings() -> dict[str, str]:
|
||||
session = SessionLocal()
|
||||
try:
|
||||
settings = session.query(Settings).all()
|
||||
return {s.key: s.value for s in settings}
|
||||
result = {s.key: s.value for s in settings}
|
||||
|
||||
# Auto-migrate legacy model IDs
|
||||
migrated = False
|
||||
for key in ("model", "api_model"):
|
||||
old_id = result.get(key)
|
||||
if old_id and old_id in LEGACY_MODEL_MAP:
|
||||
new_id = LEGACY_MODEL_MAP[old_id]
|
||||
setting = session.query(Settings).filter(Settings.key == key).first()
|
||||
if setting:
|
||||
setting.value = new_id
|
||||
setting.updated_at = datetime.now()
|
||||
result[key] = new_id
|
||||
migrated = True
|
||||
logger.info("Migrated setting '%s': %s -> %s", key, old_id, new_id)
|
||||
|
||||
if migrated:
|
||||
session.commit()
|
||||
|
||||
return result
|
||||
finally:
|
||||
session.close()
|
||||
except Exception as e:
|
||||
@@ -624,10 +652,10 @@ API_PROVIDERS: dict[str, dict[str, Any]] = {
|
||||
"base_url": None,
|
||||
"requires_auth": False,
|
||||
"models": [
|
||||
{"id": "claude-opus-4-5-20251101", "name": "Claude Opus 4.5"},
|
||||
{"id": "claude-sonnet-4-5-20250929", "name": "Claude Sonnet 4.5"},
|
||||
{"id": "claude-opus-4-6", "name": "Claude Opus"},
|
||||
{"id": "claude-sonnet-4-5-20250929", "name": "Claude Sonnet"},
|
||||
],
|
||||
"default_model": "claude-opus-4-5-20251101",
|
||||
"default_model": "claude-opus-4-6",
|
||||
},
|
||||
"kimi": {
|
||||
"name": "Kimi K2.5 (Moonshot)",
|
||||
|
||||
@@ -7,7 +7,6 @@ Settings are stored in the registry database and shared across all projects.
|
||||
"""
|
||||
|
||||
import mimetypes
|
||||
import os
|
||||
import sys
|
||||
|
||||
from fastapi import APIRouter
|
||||
@@ -39,19 +38,6 @@ def _parse_yolo_mode(value: str | None) -> bool:
|
||||
return (value or "false").lower() == "true"
|
||||
|
||||
|
||||
def _is_glm_mode() -> bool:
|
||||
"""Check if GLM API is configured via environment variables."""
|
||||
base_url = os.getenv("ANTHROPIC_BASE_URL", "")
|
||||
# GLM mode is when ANTHROPIC_BASE_URL is set but NOT pointing to Ollama
|
||||
return bool(base_url) and not _is_ollama_mode()
|
||||
|
||||
|
||||
def _is_ollama_mode() -> bool:
|
||||
"""Check if Ollama API is configured via environment variables."""
|
||||
base_url = os.getenv("ANTHROPIC_BASE_URL", "")
|
||||
return "localhost:11434" in base_url or "127.0.0.1:11434" in base_url
|
||||
|
||||
|
||||
@router.get("/providers", response_model=ProvidersResponse)
|
||||
async def get_available_providers():
|
||||
"""Get list of available API providers."""
|
||||
@@ -116,9 +102,8 @@ async def get_settings():
|
||||
|
||||
api_provider = all_settings.get("api_provider", "claude")
|
||||
|
||||
# Compute glm_mode / ollama_mode from api_provider for backward compat
|
||||
glm_mode = api_provider == "glm" or _is_glm_mode()
|
||||
ollama_mode = api_provider == "ollama" or _is_ollama_mode()
|
||||
glm_mode = api_provider == "glm"
|
||||
ollama_mode = api_provider == "ollama"
|
||||
|
||||
return SettingsResponse(
|
||||
yolo_mode=_parse_yolo_mode(all_settings.get("yolo_mode")),
|
||||
@@ -181,8 +166,8 @@ async def update_settings(update: SettingsUpdate):
|
||||
# Return updated settings
|
||||
all_settings = get_all_settings()
|
||||
api_provider = all_settings.get("api_provider", "claude")
|
||||
glm_mode = api_provider == "glm" or _is_glm_mode()
|
||||
ollama_mode = api_provider == "ollama" or _is_ollama_mode()
|
||||
glm_mode = api_provider == "glm"
|
||||
ollama_mode = api_provider == "ollama"
|
||||
|
||||
return SettingsResponse(
|
||||
yolo_mode=_parse_yolo_mode(all_settings.get("yolo_mode")),
|
||||
|
||||
@@ -411,8 +411,8 @@ class SettingsResponse(BaseModel):
|
||||
"""Response schema for global settings."""
|
||||
yolo_mode: bool = False
|
||||
model: str = DEFAULT_MODEL
|
||||
glm_mode: bool = False # True if GLM API is configured via .env
|
||||
ollama_mode: bool = False # True if Ollama API is configured via .env
|
||||
glm_mode: bool = False # True when api_provider is "glm"
|
||||
ollama_mode: bool = False # True when api_provider is "ollama"
|
||||
testing_agent_ratio: int = 1 # Regression testing agents (0-3)
|
||||
playwright_headless: bool = True
|
||||
batch_size: int = 3 # Features per coding agent batch (1-3)
|
||||
|
||||
@@ -157,7 +157,7 @@ class AssistantChatSession:
|
||||
"""
|
||||
Manages a read-only assistant conversation for a project.
|
||||
|
||||
Uses Claude Opus 4.5 with only read-only tools enabled.
|
||||
Uses Claude Opus with only read-only tools enabled.
|
||||
Persists conversation history to SQLite.
|
||||
"""
|
||||
|
||||
@@ -258,11 +258,11 @@ class AssistantChatSession:
|
||||
system_cli = shutil.which("claude")
|
||||
|
||||
# Build environment overrides for API configuration
|
||||
from registry import get_effective_sdk_env
|
||||
from registry import DEFAULT_MODEL, get_effective_sdk_env
|
||||
sdk_env = get_effective_sdk_env()
|
||||
|
||||
# Determine model from SDK env (provider-aware) or fallback to env/default
|
||||
model = sdk_env.get("ANTHROPIC_DEFAULT_OPUS_MODEL") or os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL", "claude-opus-4-5-20251101")
|
||||
model = sdk_env.get("ANTHROPIC_DEFAULT_OPUS_MODEL") or os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL", DEFAULT_MODEL)
|
||||
|
||||
try:
|
||||
logger.info("Creating ClaudeSDKClient...")
|
||||
|
||||
@@ -154,11 +154,11 @@ class ExpandChatSession:
|
||||
system_prompt = skill_content.replace("$ARGUMENTS", project_path)
|
||||
|
||||
# Build environment overrides for API configuration
|
||||
from registry import get_effective_sdk_env
|
||||
from registry import DEFAULT_MODEL, get_effective_sdk_env
|
||||
sdk_env = get_effective_sdk_env()
|
||||
|
||||
# Determine model from SDK env (provider-aware) or fallback to env/default
|
||||
model = sdk_env.get("ANTHROPIC_DEFAULT_OPUS_MODEL") or os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL", "claude-opus-4-5-20251101")
|
||||
model = sdk_env.get("ANTHROPIC_DEFAULT_OPUS_MODEL") or os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL", DEFAULT_MODEL)
|
||||
|
||||
# Build MCP servers config for feature creation
|
||||
mcp_servers = {
|
||||
|
||||
@@ -346,7 +346,7 @@ class AgentProcessManager:
|
||||
|
||||
Args:
|
||||
yolo_mode: If True, run in YOLO mode (skip testing agents)
|
||||
model: Model to use (e.g., claude-opus-4-5-20251101)
|
||||
model: Model to use (e.g., claude-opus-4-6)
|
||||
parallel_mode: DEPRECATED - ignored, always uses unified orchestrator
|
||||
max_concurrency: Max concurrent coding agents (1-5, default 1)
|
||||
testing_agent_ratio: Number of regression testing agents (0-3, default 1)
|
||||
|
||||
@@ -140,11 +140,11 @@ class SpecChatSession:
|
||||
system_cli = shutil.which("claude")
|
||||
|
||||
# Build environment overrides for API configuration
|
||||
from registry import get_effective_sdk_env
|
||||
from registry import DEFAULT_MODEL, get_effective_sdk_env
|
||||
sdk_env = get_effective_sdk_env()
|
||||
|
||||
# Determine model from SDK env (provider-aware) or fallback to env/default
|
||||
model = sdk_env.get("ANTHROPIC_DEFAULT_OPUS_MODEL") or os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL", "claude-opus-4-5-20251101")
|
||||
model = sdk_env.get("ANTHROPIC_DEFAULT_OPUS_MODEL") or os.getenv("ANTHROPIC_DEFAULT_OPUS_MODEL", DEFAULT_MODEL)
|
||||
|
||||
try:
|
||||
self.client = ClaudeSDKClient(
|
||||
|
||||
@@ -40,15 +40,15 @@ class TestConvertModelForVertex(unittest.TestCase):
|
||||
def test_returns_model_unchanged_when_vertex_disabled(self):
|
||||
os.environ.pop("CLAUDE_CODE_USE_VERTEX", None)
|
||||
self.assertEqual(
|
||||
convert_model_for_vertex("claude-opus-4-5-20251101"),
|
||||
"claude-opus-4-5-20251101",
|
||||
convert_model_for_vertex("claude-opus-4-6"),
|
||||
"claude-opus-4-6",
|
||||
)
|
||||
|
||||
def test_returns_model_unchanged_when_vertex_set_to_zero(self):
|
||||
os.environ["CLAUDE_CODE_USE_VERTEX"] = "0"
|
||||
self.assertEqual(
|
||||
convert_model_for_vertex("claude-opus-4-5-20251101"),
|
||||
"claude-opus-4-5-20251101",
|
||||
convert_model_for_vertex("claude-opus-4-6"),
|
||||
"claude-opus-4-6",
|
||||
)
|
||||
|
||||
def test_returns_model_unchanged_when_vertex_set_to_empty(self):
|
||||
@@ -60,13 +60,20 @@ class TestConvertModelForVertex(unittest.TestCase):
|
||||
|
||||
# --- Vertex AI enabled: standard conversions ---
|
||||
|
||||
def test_converts_opus_model(self):
|
||||
def test_converts_legacy_opus_model(self):
|
||||
os.environ["CLAUDE_CODE_USE_VERTEX"] = "1"
|
||||
self.assertEqual(
|
||||
convert_model_for_vertex("claude-opus-4-5-20251101"),
|
||||
"claude-opus-4-5@20251101",
|
||||
)
|
||||
|
||||
def test_opus_4_6_passthrough_on_vertex(self):
|
||||
os.environ["CLAUDE_CODE_USE_VERTEX"] = "1"
|
||||
self.assertEqual(
|
||||
convert_model_for_vertex("claude-opus-4-6"),
|
||||
"claude-opus-4-6",
|
||||
)
|
||||
|
||||
def test_converts_sonnet_model(self):
|
||||
os.environ["CLAUDE_CODE_USE_VERTEX"] = "1"
|
||||
self.assertEqual(
|
||||
@@ -86,8 +93,8 @@ class TestConvertModelForVertex(unittest.TestCase):
|
||||
def test_already_vertex_format_unchanged(self):
|
||||
os.environ["CLAUDE_CODE_USE_VERTEX"] = "1"
|
||||
self.assertEqual(
|
||||
convert_model_for_vertex("claude-opus-4-5@20251101"),
|
||||
"claude-opus-4-5@20251101",
|
||||
convert_model_for_vertex("claude-sonnet-4-5@20250929"),
|
||||
"claude-sonnet-4-5@20250929",
|
||||
)
|
||||
|
||||
def test_non_claude_model_unchanged(self):
|
||||
@@ -100,8 +107,8 @@ class TestConvertModelForVertex(unittest.TestCase):
|
||||
def test_model_without_date_suffix_unchanged(self):
|
||||
os.environ["CLAUDE_CODE_USE_VERTEX"] = "1"
|
||||
self.assertEqual(
|
||||
convert_model_for_vertex("claude-opus-4-5"),
|
||||
"claude-opus-4-5",
|
||||
convert_model_for_vertex("claude-opus-4-6"),
|
||||
"claude-opus-4-6",
|
||||
)
|
||||
|
||||
def test_empty_string_unchanged(self):
|
||||
|
||||
@@ -319,7 +319,7 @@ function App() {
|
||||
{settings?.ollama_mode && (
|
||||
<div
|
||||
className="flex items-center gap-1.5 px-2 py-1 bg-card rounded border-2 border-border shadow-sm"
|
||||
title="Using Ollama local models (configured via .env)"
|
||||
title="Using Ollama local models"
|
||||
>
|
||||
<img src="/ollama.png" alt="Ollama" className="w-5 h-5" />
|
||||
<span className="text-xs font-bold text-foreground">Ollama</span>
|
||||
@@ -330,7 +330,7 @@ function App() {
|
||||
{settings?.glm_mode && (
|
||||
<Badge
|
||||
className="bg-purple-500 text-white hover:bg-purple-600"
|
||||
title="Using GLM API (configured via .env)"
|
||||
title="Using GLM API"
|
||||
>
|
||||
GLM
|
||||
</Badge>
|
||||
|
||||
@@ -325,7 +325,8 @@ export function SettingsModal({ isOpen, onClose }: SettingsModalProps) {
|
||||
: 'bg-background text-foreground hover:bg-muted'
|
||||
} ${isSaving ? 'opacity-50 cursor-not-allowed' : ''}`}
|
||||
>
|
||||
{model.name}
|
||||
<span className="block">{model.name}</span>
|
||||
<span className="block text-xs opacity-60">{model.id}</span>
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
@@ -254,15 +254,15 @@ export function useValidatePath() {
|
||||
// Default models response for placeholder (until API responds)
|
||||
const DEFAULT_MODELS: ModelsResponse = {
|
||||
models: [
|
||||
{ id: 'claude-opus-4-5-20251101', name: 'Claude Opus 4.5' },
|
||||
{ id: 'claude-sonnet-4-5-20250929', name: 'Claude Sonnet 4.5' },
|
||||
{ id: 'claude-opus-4-6', name: 'Claude Opus' },
|
||||
{ id: 'claude-sonnet-4-5-20250929', name: 'Claude Sonnet' },
|
||||
],
|
||||
default: 'claude-opus-4-5-20251101',
|
||||
default: 'claude-opus-4-6',
|
||||
}
|
||||
|
||||
const DEFAULT_SETTINGS: Settings = {
|
||||
yolo_mode: false,
|
||||
model: 'claude-opus-4-5-20251101',
|
||||
model: 'claude-opus-4-6',
|
||||
glm_mode: false,
|
||||
ollama_mode: false,
|
||||
testing_agent_ratio: 1,
|
||||
@@ -276,7 +276,7 @@ const DEFAULT_SETTINGS: Settings = {
|
||||
|
||||
const DEFAULT_PROVIDERS: ProvidersResponse = {
|
||||
providers: [
|
||||
{ id: 'claude', name: 'Claude (Anthropic)', base_url: null, models: DEFAULT_MODELS.models, default_model: 'claude-opus-4-5-20251101', requires_auth: false },
|
||||
{ id: 'claude', name: 'Claude (Anthropic)', base_url: null, models: DEFAULT_MODELS.models, default_model: 'claude-opus-4-6', requires_auth: false },
|
||||
],
|
||||
current: 'claude',
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user