feat: add multi-provider support to chat widget

- Added support for multiple AI providers (Ollama, Gemini, OpenRouter, Copilot) with provider abstraction layer
- Created settings view with provider configuration and API key management
- Updated UI to show current provider status and handle provider-specific availability
- Modified reasoning mode to work exclusively with Ollama provider
- Added provider switching functionality with persistent settings
- Updated error messages and placeholders to be
This commit is contained in:
Melvin Ragusa
2025-10-31 00:08:04 +01:00
parent 55aad289bc
commit 6cc11fc9e4
4 changed files with 1396 additions and 57 deletions

View File

@@ -9,6 +9,14 @@ from .ollama_monitor import OllamaAvailabilityMonitor
from .streaming_handler import StreamingHandler
from .command_processor import CommandProcessor, CommandResult
from .reasoning_controller import ReasoningController
from .provider_client import (
AIProvider,
OllamaProvider,
GeminiProvider,
OpenRouterProvider,
CopilotProvider,
)
from .settings_widget import SettingsWidget
class ChatWidget(widgets.Box):
@@ -19,23 +27,30 @@ class ChatWidget(widgets.Box):
self._load_css()
self._conversation_manager = ConversationManager()
self._conversation_archive = ConversationArchive()
self._ollama_client = OllamaClient()
self._current_model = self._ollama_client.default_model
# Initialize availability monitor
self._ollama_monitor = OllamaAvailabilityMonitor(self._ollama_client)
self._ollama_monitor.add_callback(self._on_ollama_availability_changed)
# Initialize command processor
self._command_processor = CommandProcessor()
self._register_commands()
# Initialize reasoning controller
self._reasoning_controller = ReasoningController()
# Set initial model based on reasoning preference
if self._ollama_client.is_available:
self._current_model = self._reasoning_controller.get_model_name()
# Initialize provider abstraction
self._current_provider: AIProvider | None = None
self._current_model: str | None = None
self._provider_instances: dict[str, AIProvider] = {}
self._initialize_provider()
# Initialize availability monitor (only for Ollama)
ollama_provider = self._get_provider("ollama")
if isinstance(ollama_provider, OllamaProvider):
self._ollama_client = ollama_provider._client
self._ollama_monitor = OllamaAvailabilityMonitor(self._ollama_client)
self._ollama_monitor.add_callback(self._on_ollama_availability_changed)
self._ollama_monitor.start()
else:
self._ollama_client = None
self._ollama_monitor = None
# Initialize command processor
self._command_processor = CommandProcessor()
self._register_commands()
# Header with title and model
header_title = widgets.Label(
@@ -44,17 +59,13 @@ class ChatWidget(widgets.Box):
css_classes=["title-2"],
)
# Display connection status if Ollama unavailable at startup
if not self._ollama_client.is_available:
model_name = "Ollama not running"
else:
model_name = self._current_model or "No local model detected"
# Display provider and model status
self._model_label = widgets.Label(
label=f"Model: {model_name}",
label="",
halign="start",
css_classes=["dim-label"],
)
self._update_model_label()
# Reasoning mode toggle button (using regular button with state tracking)
self._reasoning_enabled = self._reasoning_controller.is_enabled()
@@ -66,11 +77,19 @@ class ChatWidget(widgets.Box):
hexpand=False,
)
# Header top row with title and toggle
# Settings button (gear icon)
settings_button = widgets.Button(
label="⚙️",
on_click=lambda x: self._show_settings(),
halign="end",
hexpand=False,
)
# Header top row with title, settings, and toggle
header_top = widgets.Box(
spacing=8,
hexpand=True,
child=[header_title, self._reasoning_toggle],
child=[header_title, settings_button, self._reasoning_toggle],
)
header_box = widgets.Box(
@@ -109,8 +128,7 @@ class ChatWidget(widgets.Box):
self._text_view.set_size_request(300, 60) # Set explicit width and height
# Set placeholder text
placeholder = "Ask a question…" if self._ollama_client.is_available else "Ollama not running - start with: ollama serve"
self._placeholder_text = placeholder
self._update_placeholder_text()
self._is_placeholder_shown = False
self._updating_placeholder = False
@@ -180,12 +198,46 @@ class ChatWidget(widgets.Box):
# Initialize placeholder display
self._update_placeholder()
# Disable input if Ollama unavailable at startup
if not self._ollama_client.is_available:
# Disable input if provider unavailable at startup
if not self._current_provider or not self._current_provider.is_available:
self._set_input_enabled(False)
# Start monitoring Ollama availability
self._ollama_monitor.start()
# Create settings widget
self._settings_widget = SettingsWidget(
self._reasoning_controller,
on_provider_changed=self._on_provider_changed_from_settings,
on_back=self._show_chat
)
# Create view stack for switching between chat and settings
self._view_stack = Gtk.Stack()
self._view_stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
self._view_stack.set_transition_duration(200)
# Add chat view
chat_container = widgets.Box(
vertical=True,
spacing=12,
hexpand=True,
vexpand=True,
child=[header_box, self._scroller, input_box],
css_classes=["ai-sidebar-content"],
)
chat_container.set_margin_top(16)
chat_container.set_margin_bottom(16)
chat_container.set_margin_start(16)
chat_container.set_margin_end(16)
self._view_stack.add_named(chat_container, "chat")
self._view_stack.add_named(self._settings_widget, "settings")
self._view_stack.set_visible_child_name("chat")
# Replace main container with stack
super().__init__(
hexpand=True,
vexpand=True,
child=[self._view_stack],
)
def _auto_archive_old_messages(self, keep_recent: int = 20):
"""Auto-archive old messages on startup, keeping only recent ones.
@@ -462,11 +514,16 @@ class ChatWidget(widgets.Box):
self._append_system_message(result.message)
return
# Check Ollama availability before processing regular messages
if not self._ollama_client.is_available:
# Check provider availability before processing regular messages
if not self._current_provider or not self._current_provider.is_available:
provider_name = self._current_provider.name if self._current_provider else "Provider"
if provider_name == "ollama":
error_msg = "Ollama is not running. Please start Ollama with: ollama serve"
else:
error_msg = f"{provider_name.capitalize()} is not configured. Please check settings."
self._append_message(
"assistant",
"Ollama is not running. Please start Ollama with: ollama serve",
error_msg,
persist=False,
)
return
@@ -477,19 +534,29 @@ class ChatWidget(widgets.Box):
def _request_response(self):
"""Request AI response in background thread with streaming"""
# Double-check availability before making request
if not self._ollama_client.is_available:
if not self._current_provider or not self._current_provider.is_available:
provider_name = self._current_provider.name if self._current_provider else "Provider"
if provider_name == "ollama":
error_msg = "Ollama is not running. Please start Ollama with: ollama serve"
else:
error_msg = f"{provider_name.capitalize()} is not configured. Please check settings."
self._append_message(
"assistant",
"Ollama is not running. Please start Ollama with: ollama serve",
error_msg,
persist=False,
)
return
model = self._current_model or self._ollama_client.default_model
model = self._current_model or self._current_provider.default_model
if not model:
provider_name = self._current_provider.name
if provider_name == "ollama":
error_msg = "No Ollama models are available. Install a model with: ollama pull llama2"
else:
error_msg = f"No {provider_name.capitalize()} models are available. Check settings."
self._append_message(
"assistant",
"No Ollama models are available. Install a model with: ollama pull llama2",
error_msg,
persist=True,
)
return
@@ -597,11 +664,11 @@ class ChatWidget(widgets.Box):
try:
handler.start_stream()
# Get model-specific options
options = self._reasoning_controller.get_model_options()
# Get model-specific options (only for Ollama)
options = self._reasoning_controller.get_model_options() if self._current_provider.name == "ollama" else None
# Stream response tokens
for chunk in self._ollama_client.stream_chat(
for chunk in self._current_provider.stream_chat(
model=model,
messages=list(messages),
options=options
@@ -727,14 +794,117 @@ class ChatWidget(widgets.Box):
"""Focus the input text view"""
self._text_view.grab_focus()
def _initialize_provider(self):
"""Initialize the current provider based on preferences."""
provider_id = self._reasoning_controller.get_provider()
self._current_provider = self._get_provider(provider_id)
if self._current_provider:
if provider_id == "ollama":
# Use reasoning controller model for Ollama
self._current_model = self._reasoning_controller.get_model_name()
else:
self._current_model = self._current_provider.default_model
else:
self._current_model = None
def _get_provider(self, provider_id: str) -> AIProvider | None:
"""Get or create provider instance."""
if provider_id in self._provider_instances:
return self._provider_instances[provider_id]
if provider_id == "ollama":
provider = OllamaProvider()
elif provider_id == "gemini":
api_key = self._reasoning_controller.get_api_key("gemini")
provider = GeminiProvider(api_key=api_key) if api_key else None
elif provider_id == "openrouter":
api_key = self._reasoning_controller.get_api_key("openrouter")
provider = OpenRouterProvider(api_key=api_key) if api_key else None
elif provider_id == "copilot":
token = self._reasoning_controller.get_copilot_token()
provider = CopilotProvider(oauth_token=token) if token else None
else:
return None
if provider:
self._provider_instances[provider_id] = provider
return provider
def _update_model_label(self):
"""Update the model label with current provider and model."""
if not hasattr(self, '_model_label'):
return
if not self._current_provider:
self._model_label.label = "Provider: Not configured"
return
provider_name = self._current_provider.name.capitalize()
if self._current_provider.is_available:
model_name = self._current_model or "No model selected"
self._model_label.label = f"{provider_name}: {model_name}"
else:
if provider_name == "Ollama":
self._model_label.label = f"{provider_name}: Not running"
else:
self._model_label.label = f"{provider_name}: Not configured"
def _update_placeholder_text(self):
"""Update placeholder text based on provider availability."""
if not hasattr(self, '_text_view'):
return
if not self._current_provider or not self._current_provider.is_available:
provider_name = self._current_provider.name if self._current_provider else "Provider"
if provider_name == "ollama":
self._placeholder_text = "Ollama not running - start with: ollama serve"
else:
self._placeholder_text = f"{provider_name.capitalize()} not configured. Check settings."
else:
self._placeholder_text = "Ask a question…"
def _show_settings(self):
"""Show settings view."""
if hasattr(self, '_view_stack'):
self._view_stack.set_visible_child_name("settings")
def _show_chat(self):
"""Show chat view."""
if hasattr(self, '_view_stack'):
self._view_stack.set_visible_child_name("chat")
def _on_provider_changed_from_settings(self, provider_id: str):
"""Handle provider change from settings widget."""
self._current_provider = self._get_provider(provider_id)
if self._current_provider:
if provider_id == "ollama":
self._current_model = self._reasoning_controller.get_model_name()
else:
self._current_model = self._current_provider.default_model
else:
self._current_model = None
self._update_model_label()
self._update_placeholder_text()
self._update_placeholder()
if self._current_provider and self._current_provider.is_available:
self._set_input_enabled(True)
else:
self._set_input_enabled(False)
def _on_ollama_availability_changed(self, is_available: bool):
"""Handle Ollama availability state changes"""
# Only handle if Ollama is the current provider
if self._reasoning_controller.get_provider() != "ollama":
return
if is_available:
# Ollama became available - use model from reasoning controller
self._current_model = self._reasoning_controller.get_model_name()
model_name = self._current_model or "No local model detected"
self._model_label.label = f"Model: {model_name}"
self._placeholder_text = "Ask a question…"
self._update_model_label()
self._update_placeholder_text()
self._update_placeholder()
self._set_input_enabled(True)
@@ -746,13 +916,18 @@ class ChatWidget(widgets.Box):
)
else:
# Ollama became unavailable
self._model_label.label = "Model: Ollama not running"
self._placeholder_text = "Ollama not running - start with: ollama serve"
self._update_model_label()
self._update_placeholder_text()
self._update_placeholder()
self._set_input_enabled(False)
def _on_reasoning_toggled(self):
"""Handle reasoning mode toggle button state changes"""
# Only work for Ollama provider
if self._reasoning_controller.get_provider() != "ollama":
self._append_system_message("Reasoning mode is only available for Ollama provider.")
return
# Toggle the reasoning mode
new_state = self._reasoning_controller.toggle()
self._reasoning_enabled = new_state
@@ -761,14 +936,12 @@ class ChatWidget(widgets.Box):
new_model = self._reasoning_controller.get_model_name()
self._current_model = new_model
# Update button label
toggle_label = "🧠 Reasoning: ON" if new_state else "🧠 Reasoning: OFF"
self._reasoning_toggle.label = toggle_label
# Update model label in header
self._model_label.label = f"Model: {new_model}"
self._update_model_label()
# Show feedback message
status = "enabled" if new_state else "disabled"
@@ -807,7 +980,7 @@ class ChatWidget(widgets.Box):
Returns:
Generated title or empty string if generation fails
"""
if not messages or not self._ollama_client.is_available:
if not messages or not self._current_provider or not self._current_provider.is_available:
return ""
# Extract first few user messages for context
@@ -832,14 +1005,14 @@ class ChatWidget(widgets.Box):
]
try:
model = self._current_model or self._ollama_client.default_model
model = self._current_model or self._current_provider.default_model
if not model:
return ""
# Use non-streaming chat for title generation
response = self._ollama_client.chat(model=model, messages=title_prompt)
if response and response.get("message"):
title = response["message"].get("content", "").strip()
response = self._current_provider.chat(model=model, messages=title_prompt)
if response and response.get("content"):
title = response["content"].strip()
# Clean up the title (remove quotes, limit length)
title = title.strip('"\'').strip()
# Limit to 50 characters
@@ -905,13 +1078,14 @@ class ChatWidget(widgets.Box):
Returns:
CommandResult with model list
"""
if not self._ollama_client.is_available:
if not self._current_provider or not self._current_provider.is_available:
provider_name = self._current_provider.name if self._current_provider else "Provider"
return CommandResult(
success=False,
message="Ollama is not running. Start Ollama with: ollama serve"
message=f"{provider_name.capitalize()} is not available. Check settings."
)
models = self._ollama_client.list_models(force_refresh=True)
models = self._current_provider.list_models(force_refresh=True)
if not models:
return CommandResult(
@@ -945,10 +1119,11 @@ class ChatWidget(widgets.Box):
Returns:
CommandResult with success status
"""
if not self._ollama_client.is_available:
if not self._current_provider or not self._current_provider.is_available:
provider_name = self._current_provider.name if self._current_provider else "Provider"
return CommandResult(
success=False,
message="Ollama is not running. Start Ollama with: ollama serve"
message=f"{provider_name.capitalize()} is not available. Check settings."
)
model_name = args.strip()
@@ -960,7 +1135,7 @@ class ChatWidget(widgets.Box):
)
# Validate model exists
available_models = self._ollama_client.list_models(force_refresh=True)
available_models = self._current_provider.list_models(force_refresh=True)
if model_name not in available_models:
return CommandResult(