- Added support for multiple AI providers (Ollama, Gemini, OpenRouter, Copilot) with provider abstraction layer - Created settings view with provider configuration and API key management - Updated UI to show current provider status and handle provider-specific availability - Modified reasoning mode to work exclusively with Ollama provider - Added provider switching functionality with persistent settings - Updated error messages and placeholders to be
170 lines
6.0 KiB
Python
170 lines
6.0 KiB
Python
"""Reasoning mode controller for managing AI reasoning preferences."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import os
|
|
from dataclasses import dataclass, asdict
|
|
from pathlib import Path
|
|
from typing import Dict, Any
|
|
|
|
|
|
@dataclass
|
|
class PreferencesState:
|
|
"""User preferences for sidebar behavior."""
|
|
reasoning_enabled: bool = False
|
|
default_model: str | None = None
|
|
theme: str = "default"
|
|
provider: str = "ollama" # AI provider: "ollama", "gemini", "openrouter", "copilot"
|
|
api_keys: Dict[str, str] = None # API keys for providers (gemini, openrouter)
|
|
copilot_oauth_token: str | None = None # GitHub Copilot OAuth token
|
|
|
|
def __post_init__(self):
|
|
"""Initialize api_keys if None."""
|
|
if self.api_keys is None:
|
|
self.api_keys = {}
|
|
|
|
|
|
class ReasoningController:
|
|
"""Manages reasoning mode state and model selection."""
|
|
|
|
# Model names for reasoning toggle
|
|
INSTRUCT_MODEL = "hf.co/unsloth/Qwen3-4B-Instruct-2507-GGUF:Q8_K_XL"
|
|
THINKING_MODEL = "hf.co/unsloth/Qwen3-4B-Thinking-2507-GGUF:Q8_K_XL"
|
|
|
|
def __init__(self):
|
|
"""Initialize the reasoning controller with preference persistence."""
|
|
# Set preference file path
|
|
config_dir = Path.home() / ".config" / "aisidebar"
|
|
config_dir.mkdir(parents=True, exist_ok=True)
|
|
self._preference_file = config_dir / "preferences.json"
|
|
|
|
# Load preferences from disk
|
|
self._preferences = self._load_preferences()
|
|
|
|
def is_enabled(self) -> bool:
|
|
"""Check if reasoning mode is active.
|
|
|
|
Returns:
|
|
True if reasoning mode is enabled, False otherwise
|
|
"""
|
|
return self._preferences.reasoning_enabled
|
|
|
|
def toggle(self) -> bool:
|
|
"""Toggle reasoning mode and persist preference.
|
|
|
|
Returns:
|
|
New state of reasoning mode (True if enabled, False if disabled)
|
|
"""
|
|
self._preferences.reasoning_enabled = not self._preferences.reasoning_enabled
|
|
self._save_preferences()
|
|
return self._preferences.reasoning_enabled
|
|
|
|
def get_model_name(self) -> str:
|
|
"""Return the appropriate model name based on reasoning mode.
|
|
|
|
Returns:
|
|
THINKING_MODEL if reasoning is enabled, INSTRUCT_MODEL otherwise
|
|
"""
|
|
return self.THINKING_MODEL if self._preferences.reasoning_enabled else self.INSTRUCT_MODEL
|
|
|
|
def get_model_options(self) -> Dict[str, Any]:
|
|
"""Return the optimal parameters for the current model.
|
|
|
|
Returns:
|
|
Dictionary of model-specific parameters
|
|
"""
|
|
# Only return options for Ollama (other providers don't use these)
|
|
if self._preferences.provider != "ollama":
|
|
return {}
|
|
|
|
if self._preferences.reasoning_enabled:
|
|
# Thinking model settings
|
|
return {
|
|
"temperature": 0.6,
|
|
"top_p": 0.95,
|
|
"top_k": 20,
|
|
"min_p": 0.0,
|
|
"num_predict": 32768, # Adequate output length
|
|
}
|
|
else:
|
|
# Instruct model settings
|
|
return {
|
|
"temperature": 0.7,
|
|
"top_p": 0.8,
|
|
"top_k": 20,
|
|
"min_p": 0.0,
|
|
"num_predict": 32768,
|
|
}
|
|
|
|
def get_provider(self) -> str:
|
|
"""Get the current AI provider."""
|
|
return self._preferences.provider
|
|
|
|
def set_provider(self, provider: str) -> None:
|
|
"""Set the AI provider."""
|
|
self._preferences.provider = provider
|
|
self._save_preferences()
|
|
|
|
def get_api_key(self, provider: str) -> str | None:
|
|
"""Get API key for a provider."""
|
|
return self._preferences.api_keys.get(provider)
|
|
|
|
def set_api_key(self, provider: str, api_key: str) -> None:
|
|
"""Set API key for a provider."""
|
|
self._preferences.api_keys[provider] = api_key
|
|
self._save_preferences()
|
|
|
|
def get_copilot_token(self) -> str | None:
|
|
"""Get GitHub Copilot OAuth token."""
|
|
return self._preferences.copilot_oauth_token
|
|
|
|
def set_copilot_token(self, token: str | None) -> None:
|
|
"""Set GitHub Copilot OAuth token."""
|
|
self._preferences.copilot_oauth_token = token
|
|
self._save_preferences()
|
|
|
|
def _load_preferences(self) -> PreferencesState:
|
|
"""Load preferences from disk or create defaults.
|
|
|
|
Returns:
|
|
PreferencesState instance with loaded or default values
|
|
"""
|
|
if not self._preference_file.exists():
|
|
return PreferencesState()
|
|
|
|
try:
|
|
with self._preference_file.open("r", encoding="utf-8") as f:
|
|
data = json.load(f)
|
|
return PreferencesState(
|
|
reasoning_enabled=data.get("reasoning_enabled", False),
|
|
default_model=data.get("default_model"),
|
|
theme=data.get("theme", "default"),
|
|
provider=data.get("provider", "ollama"),
|
|
api_keys=data.get("api_keys", {}),
|
|
copilot_oauth_token=data.get("copilot_oauth_token"),
|
|
)
|
|
except (json.JSONDecodeError, OSError):
|
|
# If file is corrupted or unreadable, return defaults
|
|
return PreferencesState()
|
|
|
|
def _save_preferences(self) -> None:
|
|
"""Persist preferences to disk atomically."""
|
|
try:
|
|
# Convert dataclass to dict
|
|
data = asdict(self._preferences)
|
|
|
|
# Write to temporary file first
|
|
temp_file = self._preference_file.with_suffix(".tmp")
|
|
with temp_file.open("w", encoding="utf-8") as f:
|
|
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
f.flush()
|
|
os.fsync(f.fileno())
|
|
|
|
# Atomic replace
|
|
os.replace(temp_file, self._preference_file)
|
|
except OSError:
|
|
# If save fails, continue without crashing
|
|
# Preferences will revert to previous state on next load
|
|
pass
|