From 58bd935af02ba02d58793d7bbbb49df1f7d266b7 Mon Sep 17 00:00:00 2001 From: Melvin Ragusa Date: Sat, 25 Oct 2025 22:28:54 +0200 Subject: [PATCH] feat(aisidebar): implement Ollama availability handling and graceful startup - Add comprehensive Ollama connection error handling strategy - Implement OllamaClient with non-blocking initialization and connection checks - Create OllamaAvailabilityMonitor for periodic Ollama connection tracking - Update design and requirements to support graceful Ollama unavailability - Add new project structure for AI sidebar module with initial implementation - Enhance error handling to prevent application crashes when Ollama is not running - Prepare for future improvements in AI sidebar interaction and resilience --- .kiro/specs/ai-sidebar-enhancements/design.md | 51 +++++ .../ai-sidebar-enhancements/requirements.md | 12 ++ .kiro/specs/ai-sidebar-enhancements/tasks.md | 37 +++- aisidebar/README.md | 124 +++++++++++ aisidebar/__init__.py | 3 + aisidebar/aisidebar.py | 73 +++++++ aisidebar/chat_widget.py | 192 ++++++++++++++++++ aisidebar/conversation_manager.py | 173 ++++++++++++++++ aisidebar/data/conversations/default.json | 52 +++++ aisidebar/ollama_client.py | 130 ++++++++++++ ollama_client.py | 59 +++++- 11 files changed, 895 insertions(+), 11 deletions(-) create mode 100644 aisidebar/README.md create mode 100644 aisidebar/__init__.py create mode 100644 aisidebar/aisidebar.py create mode 100644 aisidebar/chat_widget.py create mode 100644 aisidebar/conversation_manager.py create mode 100644 aisidebar/data/conversations/default.json create mode 100644 aisidebar/ollama_client.py diff --git a/.kiro/specs/ai-sidebar-enhancements/design.md b/.kiro/specs/ai-sidebar-enhancements/design.md index 6ad3b8c..79202b9 100644 --- a/.kiro/specs/ai-sidebar-enhancements/design.md +++ b/.kiro/specs/ai-sidebar-enhancements/design.md @@ -329,6 +329,31 @@ class PreferencesState: ## Error Handling +### Ollama Unavailability + +- **Startup Without Ollama**: Initialize all components successfully, show status message in UI +- **Model List Failure**: Return empty list, display "Ollama not running" in model label +- **Chat Request Without Ollama**: Display friendly message: "Please start Ollama to use AI features" +- **Connection Lost Mid-Stream**: Display partial response + reconnection instructions +- **Periodic Availability Check**: Attempt to reconnect every 30s when unavailable (non-blocking) + +#### Implementation Strategy + +```python +class OllamaClient: + def __init__(self, host: str | None = None) -> None: + # Never raise exceptions during initialization + # Set _available = False if connection fails + + def list_models(self) -> list[str]: + # Return empty list instead of raising on connection failure + # Log warning but don't crash + + def chat(self, ...) -> dict[str, str] | None: + # Return error message dict instead of raising + # {"role": "assistant", "content": "Ollama unavailable..."} +``` + ### Streaming Errors - **Connection Lost**: Display partial response + error message, allow retry @@ -422,6 +447,32 @@ class PreferencesState: - Preferences file is optional; defaults work without it - Graceful degradation if gtk4-layer-shell unavailable +### Ollama Availability Detection + +Add periodic checking mechanism to detect when Ollama becomes available: + +```python +class OllamaAvailabilityMonitor: + """Monitors Ollama availability and notifies UI of state changes.""" + + def __init__(self, client: OllamaClient, callback: Callable[[bool], None]): + self._client = client + self._callback = callback + self._last_state = False + self._check_interval = 30 # seconds + + def start_monitoring(self) -> None: + """Begin periodic availability checks via GLib.timeout_add.""" + + def _check_availability(self) -> bool: + """Check if Ollama is available and notify on state change.""" +``` + +Integration in SidebarWindow: +- Initialize monitor on startup +- Update UI state when availability changes (enable/disable input, update status message) +- Show notification when Ollama becomes available: "Ollama connected - AI features enabled" + ### Future Enhancements - Command history with up/down arrow navigation diff --git a/.kiro/specs/ai-sidebar-enhancements/requirements.md b/.kiro/specs/ai-sidebar-enhancements/requirements.md index 1137d95..c805adb 100644 --- a/.kiro/specs/ai-sidebar-enhancements/requirements.md +++ b/.kiro/specs/ai-sidebar-enhancements/requirements.md @@ -71,3 +71,15 @@ This document outlines the requirements for enhancing the AI sidebar module for 3. WHEN reasoning mode is disabled, THE AI Sidebar SHALL request and display only the final answer without intermediate reasoning 4. THE AI Sidebar SHALL persist the reasoning mode preference across conversation sessions 5. THE AI Sidebar SHALL visually distinguish reasoning content from final answer content when reasoning mode is enabled + +### Requirement 6: Graceful Ollama Unavailability Handling + +**User Story:** As a user, I want the AI Sidebar to start and function even when Ollama is not running, so that Ignis can launch successfully and I can start Ollama when I'm ready to use the AI features. + +#### Acceptance Criteria + +1. WHEN Ollama is not running at startup, THE AI Sidebar SHALL initialize successfully without blocking Ignis startup +2. WHEN Ollama is unavailable, THE AI Sidebar SHALL display a clear message instructing the user to start Ollama +3. WHEN the user attempts to send a message while Ollama is unavailable, THE AI Sidebar SHALL display a helpful error message instead of crashing +4. WHEN Ollama becomes available after startup, THE AI Sidebar SHALL detect the availability and enable chat functionality without requiring a restart +5. THE AI Sidebar SHALL handle Ollama connection failures gracefully during model listing, switching, and chat operations diff --git a/.kiro/specs/ai-sidebar-enhancements/tasks.md b/.kiro/specs/ai-sidebar-enhancements/tasks.md index 7e9f9de..3e959ce 100644 --- a/.kiro/specs/ai-sidebar-enhancements/tasks.md +++ b/.kiro/specs/ai-sidebar-enhancements/tasks.md @@ -97,7 +97,37 @@ - Update message rendering to handle reasoning metadata - _Requirements: 5.5_ -- [ ] 8. Add error handling and edge cases +- [-] 8. Implement graceful Ollama unavailability handling +- [ ] 8.1 Update OllamaClient initialization + - Modify `__init__()` to never raise exceptions during initialization + - Add connection check that sets internal availability flag + - Update `list_models()` to return empty list instead of raising on connection failure + - Update `chat()` and `stream_chat()` to return error messages instead of raising + - _Requirements: 6.1, 6.3, 6.5_ + +- [ ] 8.2 Create OllamaAvailabilityMonitor + - Create `ollama_monitor.py` with OllamaAvailabilityMonitor class + - Implement periodic availability checking using GLib.timeout_add (30s interval) + - Add callback mechanism to notify UI of state changes + - Ensure checks are non-blocking and don't impact UI responsiveness + - _Requirements: 6.4_ + +- [ ] 8.3 Update SidebarWindow for Ollama unavailability + - Initialize OllamaAvailabilityMonitor in SidebarWindow + - Display "Ollama not running" status message when unavailable at startup + - Update model label to show connection status + - Disable input field when Ollama unavailable, show helpful message + - Add callback to re-enable features when Ollama becomes available + - _Requirements: 6.1, 6.2, 6.4_ + +- [ ] 8.4 Add user-friendly error messages + - Display clear instructions when user tries to chat without Ollama + - Show notification when Ollama connection is restored + - Update all command handlers to check Ollama availability + - Provide actionable error messages (e.g., "Start Ollama with: ollama serve") + - _Requirements: 6.2, 6.3_ + +- [ ] 9. Add error handling and edge cases - Implement stream timeout handling (60s limit) with cancellation - Add connection error recovery for streaming failures - Handle command execution during active streaming @@ -105,10 +135,11 @@ - Implement graceful degradation for missing preferences file - _Requirements: 1.4, 3.5, 4.3, 4.4_ -- [ ] 9. Polish and integration +- [ ] 10. Polish and integration - Add CSS styling for system messages, reasoning content, and streaming indicator - Implement `/help` command to display available commands - Add visual feedback for command execution (loading states) - Ensure all UI updates maintain smooth scrolling behavior - Test keyboard focus management across all new widgets - - _Requirements: 1.3, 2.3, 3.5, 5.5_ + - Add status indicator in UI showing Ollama connection state + - _Requirements: 1.3, 2.3, 3.5, 5.5, 6.2_ diff --git a/aisidebar/README.md b/aisidebar/README.md new file mode 100644 index 0000000..609457a --- /dev/null +++ b/aisidebar/README.md @@ -0,0 +1,124 @@ +## AI Sidebar for Ignis + +A sleek AI chat sidebar that integrates with your Ignis desktop, sliding in from the left side with Ollama AI integration. + +### Features + +- **Slide-in Animation**: Smoothly slides in from the left side (opposite of QuickCenter) +- **Ollama Integration**: Chat with local AI models via Ollama +- **Conversation Persistence**: Your conversations are automatically saved and restored +- **Material Design 3**: Matches your existing Ignis theme perfectly +- **Keyboard Toggle**: Bind a key to toggle the sidebar visibility + +### How to Use + +#### Open/Close the Sidebar + +You can toggle the sidebar using: +1. **Python/Script**: Call `window_manager.toggle_window("AISidebar")` +2. **Keyboard Shortcut**: Add a binding in your window manager config + +#### Setting up a Keyboard Shortcut + +For **Niri**, add this to your `~/.config/niri/config.kdl`: + +```kdl +binds { + // ... your other bindings + + // Toggle AI Sidebar with Super+A (or any key you prefer) + Mod+A { spawn "ignis" "run" "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')"; } +} +``` + +For **Hyprland**, add this to your `~/.config/hypr/hyprland.conf`: + +```conf +# Toggle AI Sidebar with Super+A +bind = SUPER, A, exec, ignis run "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')" +``` + +For **Sway**, add this to your `~/.config/sway/config`: + +``` +# Toggle AI Sidebar with Super+A +bindsym $mod+A exec ignis run "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')" +``` + +### Requirements + +- **Ignis** desktop environment +- **Python 3.10+** +- **Ollama** with at least one model installed +- **ollama Python package**: `pip install ollama` + +### Configuration + +The sidebar will automatically: +- Detect your default Ollama model +- Store conversations in `~/.config/ignis/modules/aisidebar/data/conversations/` +- Apply your current Ignis theme colors + +### Customization + +#### Change Width + +Edit `aisidebar.py` line 19: +```python +self.content_box.width_request = 400 # Change to desired width +``` + +#### Change Animation Speed + +Edit `aisidebar.py` line 24: +```python +transition_duration=300, # Change to desired milliseconds +``` + +#### Custom CSS Styling + +Edit `~/.config/ignis/styles/aisidebar.scss` to customize: +- Colors (uses Material Design 3 color tokens) +- Border radius +- Padding/margins +- Message bubble styling + +### Troubleshooting + +**Sidebar doesn't appear:** +- Restart Ignis: `ignis reload` +- Check Ollama is running: `curl http://127.0.0.1:11434/api/tags` +- Check console for errors: `ignis` + +**No AI responses:** +- Ensure Ollama is running +- Ensure `ollama` Python package is installed in Ignis's Python environment +- Check that you have at least one model: `ollama list` + +**CSS not applying:** +- Restart Ignis: `ignis reload` +- Check SCSS compilation: Look for errors in Ignis console output + +### Architecture + +``` +~/.config/ignis/modules/aisidebar/ +├── __init__.py # Module exports +├── aisidebar.py # Main RevealerWindow class +├── chat_widget.py # Chat UI widget +├── ollama_client.py # Ollama API wrapper +├── conversation_manager.py # Conversation persistence +└── data/ + └── conversations/ # Saved conversations (auto-created) +``` + +### Visual Design + +The AI Sidebar follows the same visual language as QuickCenter: +- Material Design 3 color system +- 20px border radius on container +- Surface elevation with shadows +- Smooth slide-in transitions +- Translucent overlay backdrop + +Clicking outside the sidebar will close it (same as QuickCenter behavior). diff --git a/aisidebar/__init__.py b/aisidebar/__init__.py new file mode 100644 index 0000000..0f657ed --- /dev/null +++ b/aisidebar/__init__.py @@ -0,0 +1,3 @@ +from .aisidebar import AISidebar + +__all__ = ["AISidebar"] diff --git a/aisidebar/aisidebar.py b/aisidebar/aisidebar.py new file mode 100644 index 0000000..a06cfaa --- /dev/null +++ b/aisidebar/aisidebar.py @@ -0,0 +1,73 @@ +from ignis import widgets +from ignis.window_manager import WindowManager +from ignis.services.niri import NiriService +from .chat_widget import ChatWidget + +window_manager = WindowManager.get_default() + + +class AISidebar(widgets.RevealerWindow): + """AI Chat Sidebar that slides in from the left side""" + + def __init__(self): + # Create chat interface + self.chat_widget = ChatWidget() + + # Content box - 400px wide to match QuickCenter + self.content_box = widgets.Box( + vertical=True, + spacing=0, + hexpand=False, + css_classes=["ai-sidebar"], + child=[self.chat_widget], + ) + self.content_box.width_request = 400 + self.content_box.set_halign("start") # Align to left side + + # Revealer for slide animation + revealer = widgets.Revealer( + child=self.content_box, + transition_duration=300, + transition_type="slide_right", # Slide in from left + halign="start", # Align revealer to left + ) + + # Close button overlay (click outside to close) + close_button = widgets.Button( + vexpand=True, + hexpand=True, + can_focus=False, + on_click=lambda x: window_manager.close_window("AISidebar"), + ) + + main_overlay = widgets.Overlay( + css_classes=["popup-close"], + child=close_button, + overlays=[revealer], + ) + + super().__init__( + revealer=revealer, + child=main_overlay, + css_classes=["popup-close"], + hide_on_close=True, + visible=False, + namespace="AISidebar", + popup=True, + layer="overlay", # Same as QuickCenter + kb_mode="exclusive", # Same as QuickCenter + anchor=["left", "right", "top", "bottom"], # Anchor to ALL edges like QuickCenter + ) + + self.window_manager = window_manager + self.revealer = revealer + self.niri = NiriService.get_default() + + self.connect("notify::visible", self._toggle_revealer) + + def _toggle_revealer(self, *_): + """Toggle revealer when window visibility changes""" + self.revealer.reveal_child = self.visible + if self.visible: + # Focus on input when opened + self.chat_widget.focus_input() diff --git a/aisidebar/chat_widget.py b/aisidebar/chat_widget.py new file mode 100644 index 0000000..0ecd57f --- /dev/null +++ b/aisidebar/chat_widget.py @@ -0,0 +1,192 @@ +import threading +from ignis import widgets, app +from gi.repository import GLib +from .ollama_client import OllamaClient +from .conversation_manager import ConversationManager + + +class ChatWidget(widgets.Box): + """Chat interface widget with Ollama integration""" + + def __init__(self): + self._conversation_manager = ConversationManager() + self._ollama_client = OllamaClient() + self._current_model = self._ollama_client.default_model + + # Header with title and model + header_title = widgets.Label( + label="AI Sidebar", + halign="start", + css_classes=["title-2"], + ) + + model_name = self._current_model or "No local model detected" + + self._model_label = widgets.Label( + label=f"Model: {model_name}", + halign="start", + css_classes=["dim-label"], + ) + + header_box = widgets.Box( + vertical=True, + spacing=4, + child=[header_title, self._model_label], + ) + + # Message list + self._message_list = widgets.Box( + vertical=True, + spacing=8, + hexpand=True, + vexpand=True, + valign="start", + ) + + # Scrolled window for messages + self._scroller = widgets.Scroll( + hexpand=True, + vexpand=True, + min_content_height=300, + child=self._message_list, + ) + + # Input entry + self._entry = widgets.Entry( + hexpand=True, + placeholder_text="Ask a question…", + on_accept=lambda x: self._on_submit(), + ) + + # Send button + self._send_button = widgets.Button( + label="Send", + on_click=lambda x: self._on_submit(), + ) + + # Input box + input_box = widgets.Box( + spacing=8, + hexpand=True, + child=[self._entry, self._send_button], + ) + + # Main container + super().__init__( + vertical=True, + spacing=12, + hexpand=True, + vexpand=True, + child=[header_box, self._scroller, input_box], + css_classes=["ai-sidebar-content"], + ) + + # Set margins + self.set_margin_top(16) + self.set_margin_bottom(16) + self.set_margin_start(16) + self.set_margin_end(16) + + # Load initial messages + self._populate_initial_messages() + + def _populate_initial_messages(self): + """Load conversation history""" + for message in self._conversation_manager.messages: + self._append_message(message["role"], message["content"], persist=False) + + if not self._conversation_manager.messages: + self._append_message( + "assistant", + "Welcome! Ask a question to start a conversation.", + persist=True, + ) + + def _append_message(self, role: str, content: str, *, persist: bool = True): + """Add a message bubble to the chat""" + label_prefix = "You" if role == "user" else "Assistant" + + message_label = widgets.Label( + label=f"{label_prefix}: {content}", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", # Fixed: use underscore not hyphen + justify="left", + ) + + self._message_list.append(message_label) + self._scroll_to_bottom() + + if persist and self._conversation_manager: + self._conversation_manager.append_message(role, content) + + def _scroll_to_bottom(self): + """Scroll to the latest message""" + def _scroll(): + adjustment = self._scroller.get_vadjustment() + if adjustment: + adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size()) + return False + + GLib.idle_add(_scroll) + + def _set_input_enabled(self, enabled: bool): + """Enable/disable input controls""" + self._entry.set_sensitive(enabled) + self._send_button.set_sensitive(enabled) + + def _on_submit(self): + """Handle message submission""" + text = self._entry.text.strip() + if not text: + return + + self._entry.text = "" + self._append_message("user", text, persist=True) + self._request_response() + + def _request_response(self): + """Request AI response in background thread""" + model = self._current_model or self._ollama_client.default_model + if not model: + self._append_message( + "assistant", + "No Ollama models are available. Install a model to continue.", + persist=True, + ) + return + + history = self._conversation_manager.chat_messages + self._set_input_enabled(False) + + def _worker(messages): + response = self._ollama_client.chat(model=model, messages=list(messages)) + GLib.idle_add(self._handle_response, response, priority=GLib.PRIORITY_DEFAULT) + + thread = threading.Thread(target=_worker, args=(history,), daemon=True) + thread.start() + + def _handle_response(self, response): + """Handle AI response""" + self._set_input_enabled(True) + + if not response: + self._append_message( + "assistant", + "The model returned an empty response.", + persist=True, + ) + return False + + role = response.get("role", "assistant") + content = response.get("content") or "" + if not content: + content = "[No content received from Ollama]" + + self._append_message(role, content, persist=True) + return False + + def focus_input(self): + """Focus the input entry""" + self._entry.grab_focus() diff --git a/aisidebar/conversation_manager.py b/aisidebar/conversation_manager.py new file mode 100644 index 0000000..7e0b365 --- /dev/null +++ b/aisidebar/conversation_manager.py @@ -0,0 +1,173 @@ +"""Conversation state management and persistence helpers.""" + +from __future__ import annotations + +import json +import os +import tempfile +from dataclasses import dataclass, field +from datetime import datetime, timezone +from pathlib import Path +from typing import ClassVar, Dict, Iterable, List, MutableMapping + +DEFAULT_CONVERSATION_ID = "default" + + +@dataclass +class ConversationState: + """In-memory representation of a conversation transcript.""" + + conversation_id: str + created_at: str + updated_at: str + messages: List[Dict[str, str]] = field(default_factory=list) + + +class ConversationManager: + """Load and persist conversation transcripts as JSON files.""" + + VALID_ROLES: ClassVar[set[str]] = {"system", "user", "assistant"} + + def __init__( + self, + storage_dir: str | Path | None = None, + conversation_id: str | None = None, + ) -> None: + module_root = Path(__file__).resolve().parent + default_storage = module_root / "data" / "conversations" + self._storage_dir = Path(storage_dir) if storage_dir else default_storage + self._storage_dir.mkdir(parents=True, exist_ok=True) + + self._conversation_id = conversation_id or DEFAULT_CONVERSATION_ID + self._path = self._storage_dir / f"{self._conversation_id}.json" + + self._state = self._load_state() + + # ------------------------------------------------------------------ properties + @property + def conversation_id(self) -> str: + return self._state.conversation_id + + @property + def messages(self) -> List[Dict[str, str]]: + return list(self._state.messages) + + @property + def chat_messages(self) -> List[Dict[str, str]]: + """Return messages formatted for the Ollama chat API.""" + return [ + {"role": msg["role"], "content": msg["content"]} + for msg in self._state.messages + ] + + # ------------------------------------------------------------------ public API + def append_message(self, role: str, content: str) -> Dict[str, str]: + """Append a new message and persist the updated transcript.""" + normalized_role = role.lower() + if normalized_role not in self.VALID_ROLES: + raise ValueError(f"Invalid role '{role}'. Expected one of {self.VALID_ROLES}.") + + timestamp = datetime.now(timezone.utc).isoformat() + message = { + "role": normalized_role, + "content": content, + "timestamp": timestamp, + } + + self._state.messages.append(message) + self._state.updated_at = timestamp + self._write_state() + return message + + def replace_messages(self, messages: Iterable[Dict[str, str]]) -> None: + """Replace the transcript contents. Useful for loading fixtures.""" + normalized: List[Dict[str, str]] = [] + for item in messages: + role = item.get("role", "").lower() + content = item.get("content", "") + if role not in self.VALID_ROLES: + continue + normalized.append( + { + "role": role, + "content": content, + "timestamp": item.get("timestamp") + or datetime.now(timezone.utc).isoformat(), + } + ) + + now = datetime.now(timezone.utc).isoformat() + self._state.messages = normalized + self._state.created_at = self._state.created_at or now + self._state.updated_at = now + self._write_state() + + # ------------------------------------------------------------------ persistence + def _load_state(self) -> ConversationState: + """Load the transcript from disk or create a fresh default.""" + if self._path.exists(): + try: + with self._path.open("r", encoding="utf-8") as fh: + payload = json.load(fh) + return self._state_from_payload(payload) + except (json.JSONDecodeError, OSError): + pass + + timestamp = datetime.now(timezone.utc).isoformat() + return ConversationState( + conversation_id=self._conversation_id, + created_at=timestamp, + updated_at=timestamp, + messages=[], + ) + + def _state_from_payload(self, payload: MutableMapping[str, object]) -> ConversationState: + """Normalize persisted data into ConversationState instances.""" + conversation_id = str(payload.get("id") or self._conversation_id) + created_at = str(payload.get("created_at") or datetime.now(timezone.utc).isoformat()) + updated_at = str(payload.get("updated_at") or created_at) + + messages_payload = payload.get("messages", []) + messages: List[Dict[str, str]] = [] + if isinstance(messages_payload, list): + for item in messages_payload: + if not isinstance(item, dict): + continue + role = str(item.get("role", "")).lower() + content = str(item.get("content", "")) + if role not in self.VALID_ROLES: + continue + timestamp = str( + item.get("timestamp") or datetime.now(timezone.utc).isoformat() + ) + messages.append({"role": role, "content": content, "timestamp": timestamp}) + + return ConversationState( + conversation_id=conversation_id, + created_at=created_at, + updated_at=updated_at, + messages=messages, + ) + + def _write_state(self) -> None: + """Persist the conversation state atomically.""" + payload = { + "id": self._state.conversation_id, + "created_at": self._state.created_at, + "updated_at": self._state.updated_at, + "messages": self._state.messages, + } + + with tempfile.NamedTemporaryFile( + "w", + encoding="utf-8", + dir=self._storage_dir, + delete=False, + prefix=f"{self._conversation_id}.", + suffix=".tmp", + ) as tmp_file: + json.dump(payload, tmp_file, indent=2, ensure_ascii=False) + tmp_file.flush() + os.fsync(tmp_file.fileno()) + + os.replace(tmp_file.name, self._path) diff --git a/aisidebar/data/conversations/default.json b/aisidebar/data/conversations/default.json new file mode 100644 index 0000000..5e80c71 --- /dev/null +++ b/aisidebar/data/conversations/default.json @@ -0,0 +1,52 @@ +{ + "id": "default", + "created_at": "2025-10-25T18:40:57.917675+00:00", + "updated_at": "2025-10-25T19:31:22.276254+00:00", + "messages": [ + { + "role": "assistant", + "content": "Welcome! Ask a question to start a conversation.", + "timestamp": "2025-10-25T18:40:57.919077+00:00" + }, + { + "role": "user", + "content": "Hello!", + "timestamp": "2025-10-25T19:21:06.686290+00:00" + }, + { + "role": "assistant", + "content": "No Ollama models are available. Install a model to continue.", + "timestamp": "2025-10-25T19:21:06.690475+00:00" + }, + { + "role": "user", + "content": "Hello", + "timestamp": "2025-10-25T19:26:08.659211+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"Hello\" again. The previous response mentioned that no Ollama models are available and suggested installing one. Maybe the user isn't familiar with Ollama or the setup process. I should explain what Ollama is and how to install a model. Let me check the steps for installing a model with Ollama. First, they need to download the model files, then use the Ollama CLI to load them. I should provide clear instructions, maybe mention popular models like Llama or Mistral. Also, offer further assistance if they need help with the installation. Keep the tone friendly and helpful.\n\n\nHello! It looks like you're trying to use Ollama, but no models are installed yet. To get started, you'll need to download and install a model using the Ollama CLI. Here's how:\n\n1. **Install Ollama**: If you haven't already, download it from [https://ollama.com](https://ollama.com) and follow the installation instructions for your OS.\n2. **Pull a Model**: Use the command `ollama pull ` (e.g., `ollama pull llama3` or `ollama pull mistral`).\n3. **Run the Model**: Once installed, use `ollama run ` to start a chat session.\n\nLet me know if you need help with installation or model selection! 😊", + "timestamp": "2025-10-25T19:26:15.674075+00:00" + }, + { + "role": "user", + "content": "What is a \"Baumstriezel\"?", + "timestamp": "2025-10-25T19:27:39.745357+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What is a 'Baumstriezel'?\" I need to figure out what that is. First, I'll check if I know anything about it. I remember that \"Baumstriezel\" sounds German. Maybe it's a type of pastry or a holiday item? Let me think.\n\nWait, \"Baum\" means tree in German, and \"Striezel\" could be related to something like a roll or a cake. Maybe it's a tree-shaped pastry? I think there's a German dessert called \"Stollen,\" which is a fruit cake, but maybe \"Baumstriezel\" is a variation. Alternatively, could it be a Christmas decoration? Like a tree-shaped ornament?\n\nI should verify. Let me recall. In some German-speaking regions, there are traditional sweets. \"Striezel\" might refer to a type of roll or cake. Maybe \"Baumstriezel\" is a tree-shaped version of that. Alternatively, could it be a type of Christmas tree made from candy or something else?\n\nWait, I think there's a German confection called \"Baumstriezel\" which is a tree-shaped sweet. It's often made with marzipan and chocolate, decorated with nuts and fruits. It's a traditional Christmas treat. Let me confirm that. Yes, I believe that's right. So the answer would be that it's a tree-shaped sweet, typically made with marzipan and chocolate, popular during Christmas. It's similar to a Stollen but in the shape of a tree. I should explain that and maybe mention it's a festive item, often found in Germany and other German-speaking countries during the holiday season.\n\n\nA **Baumstriezel** (sometimes spelled **Baumstriezel** or **Baumstriezel**) is a traditional German Christmas sweet shaped like a tree. It is typically made with **marzipan** and **chocolate**, and is decorated with **nuts, fruits, and candied peel** to resemble a tree. The name translates to \"tree roll\" or \"tree cake\" in English. \n\nThis festive treat is often enjoyed during the **Christmas season** in Germany and other German-speaking regions. It’s a sweet, chewy dessert that symbolizes the holiday spirit, often displayed as a decorative item or eaten as a treat. \n\nIf you’re curious about its taste or how it’s made, I’d be happy to share more details! 🎄✨", + "timestamp": "2025-10-25T19:27:49.659293+00:00" + }, + { + "role": "user", + "content": "What can I do do improve my programming skills?", + "timestamp": "2025-10-25T19:30:51.726827+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What can I do to improve my programming skills?\" Let me think about how to approach this. First, I need to provide a comprehensive answer that covers various aspects of skill development. The user might be a beginner or intermediate programmer looking for ways to grow.\n\nI should start by breaking down the answer into clear, actionable steps. Maybe categorize them into learning fundamentals, practice, projects, etc. It's important to mention both theoretical and practical aspects. Also, the user might be interested in resources, so including some recommendations would be helpful.\n\nWait, the user might not have specified their current level. Should I address different levels? Maybe mention that the advice applies to all levels but emphasize the importance of fundamentals for beginners. Also, include tips on problem-solving, understanding algorithms, and staying updated with technologies.\n\nOh, and collaboration is key. Suggesting contributing to open source or working on team projects could be beneficial. Maybe mention debugging skills and code reviews as well. Oh, and the importance of documentation and writing clean code. \n\nI should also think about common pitfalls. For example, learning without practicing, or not understanding the basics. Emphasize the need for consistent practice. Maybe include some resources like books, online courses, or communities. \n\nWait, the user might be looking for a structured approach. So, listing steps in order of importance could help. Start with fundamentals, then practice, then projects, then advanced topics. Also, mention the importance of patience and persistence. \n\nI should make sure the answer is encouraging and not overwhelming. Break it into sections with headings for clarity. Use examples like coding challenges, personal projects, etc. Maybe include tips on using version control like Git. \n\nAlso, the user might be interested in how to stay motivated. Suggesting setting goals, tracking progress, and celebrating small wins could be useful. Oh, and the importance of understanding the problem before jumping into coding. \n\nI need to avoid being too technical or too vague. Balance between practical advice and theoretical knowledge. Maybe mention specific tools or platforms like LeetCode, HackerRank, or GitHub. \n\nWait, the user might not know where to start. Including resources like free courses or communities like Stack Overflow or Reddit could be helpful. Also, mentioning the importance of reading others' code and contributing to open source. \n\nI should also touch on soft skills like communication and teamwork, especially if they're aiming for a career in programming. Maybe mention the importance of understanding the bigger picture, like system design or architecture. \n\nLet me structure the answer with clear sections: Learn Fundamentals, Practice Regularly, Work on Projects, Learn Advanced Topics, Collaborate, Stay Updated, and More. Each section with a few bullet points. That way, it's easy to follow and covers all bases. \n\nI need to make sure the language is friendly and encouraging. Avoid jargon where possible, but still provide enough depth. Also, remind them that improvement takes time and consistent effort. \n\nOkay, that should cover the main points. Let me put it all together in a clear, organized manner.\n\n\nImproving your programming skills is a continuous journey, but with the right strategies, you can grow significantly. Here’s a structured approach to help you progress:\n\n---\n\n### **1. Master the Fundamentals**\n- **Learn the basics**: Ensure you understand core concepts like data types, control structures (loops, conditionals), functions, and data structures (arrays, lists, dictionaries).\n- **Study algorithms and problem-solving**: Practice solving problems using logic and algorithms (e.g., sorting, searching, recursion). Platforms like **LeetCode**, **HackerRank**, or **Codewars** are great for this.\n- **Understand the language’s syntax and idioms**: For your chosen language (e.g., Python, JavaScript, Java), learn its best practices and common patterns.\n\n---\n\n### **2. Practice Regularly**\n- **Write code daily**: Even 15–30 minutes a day can make a difference. Use coding challenges or build small projects.\n- **Debug and refactor**: Learn to identify and fix bugs. Refactor code to improve readability and efficiency.\n- **Use coding platforms**: Websites like **Exercism**, **CodinGame**, or **FreeCodeCamp** offer interactive exercises and projects.\n\n---\n\n### **3. Build Projects**\n- **Start with small projects**: Create tools, games, or utilities (e.g., a to-do list app, calculator, or weather app).\n- **Work on real-world problems**: Solve issues you encounter in daily life (e.g., automate file sorting, create a budget tracker).\n- **Contribute to open source**: Find beginner-friendly projects on **GitHub** and contribute fixes or features. This helps you learn collaboration and real-world coding practices.\n\n---\n\n### **4. Learn Advanced Topics**\n- **Explore frameworks and libraries**: Learn tools relevant to your goals (e.g., React for front-end, Django for back-end, or TensorFlow for AI).\n- **Study software design patterns**: Understand patterns like MVC, singleton, or observer to write scalable code.\n- **Learn about system architecture**: For larger projects, study how to design systems, databases, and APIs.\n\n---\n\n### **5. Collaborate and Learn from Others**\n- **Join coding communities**: Participate in forums like **Stack Overflow**, **Reddit (r/learnprogramming)**, or Discord groups.\n- **Code reviews**: Ask others to review your code (or review theirs) to learn best practices and spot improvements.\n- **Pair programming**: Work with others to solve problems collaboratively, which can expose you to new ideas and techniques.\n\n---\n\n### **6. Stay Updated and Curious**\n- **Follow industry trends**: Read blogs, newsletters, or podcasts like **The Changelog**, **Software Engineering Daily**, or **Dev.to**.\n- **Experiment with new technologies**: Try tools, languages, or frameworks that interest you (e.g., AI, blockchain, or web3).\n- **Attend workshops or courses**: Platforms like **Coursera**, **Udemy**, or **Pluralsight** offer structured learning paths.\n\n---\n\n### **7. Document and Reflect**\n- **Write explanations**: Teach others by summarizing what you’ve learned (e.g., write blog posts or explain concepts to a friend).\n- **Track progress**: Use tools like **GitHub**, **Notion**, or a journal to document your projects and growth.\n- **Review mistakes**: Analyze errors to avoid repeating them and improve your understanding.\n\n---\n\n### **8. Focus on Soft Skills**\n- **Communication**: Practice explaining your code and ideas clearly, especially when working in teams.\n- **Time management**: Set goals (e.g., \"Learn Python in 3 months\") and break tasks into manageable steps.\n- **Resilience**: Embrace failure as part of the learning process. Debugging and fixing bugs are essential skills.\n\n---\n\n### **Resources to Explore**\n- **Books**: *Clean Code* by Robert C. Martin, *The Pragmatic Programmer*, *Grokking Algorithms*.\n- **Courses**: FreeCodeCamp, Coursera, edX, or YouTube tutorials (e.g., Traversy Media, The Net Ninja).\n- **Tools**: Git for version control, VS Code or PyCharm for IDEs, and Docker for containerization.\n\n---\n\n### **Final Tips**\n- **Be patient**: Mastery takes time. Celebrate small wins (e.g., fixing a bug or completing a project).\n- **Stay consistent**: Even 10–15 minutes of daily practice can lead to significant growth over time.\n- **Ask questions**: Don’t hesitate to seek help when stuck—others are often happy to share knowledge.\n\nBy combining theory, practice, and curiosity, you’ll steadily improve your skills and become a more confident, versatile programmer. 🚀", + "timestamp": "2025-10-25T19:31:22.276254+00:00" + } + ] +} \ No newline at end of file diff --git a/aisidebar/ollama_client.py b/aisidebar/ollama_client.py new file mode 100644 index 0000000..6615977 --- /dev/null +++ b/aisidebar/ollama_client.py @@ -0,0 +1,130 @@ +"""Client utilities for interacting with the Ollama API via direct HTTP calls.""" + +from __future__ import annotations + +import json +from typing import Any, Dict, Iterable, Iterator +from urllib.request import Request, urlopen +from urllib.error import URLError, HTTPError + + +class OllamaClientError(RuntimeError): + """Base exception raised when Ollama operations fail.""" + + +class OllamaUnavailableError(OllamaClientError): + """Raised when the Ollama server is not available.""" + + +class OllamaClient: + """HTTP client for interacting with Ollama's REST API.""" + + def __init__(self, host: str | None = None) -> None: + self._host = host or "http://localhost:11434" + self._cached_models: list[str] | None = None + + # ------------------------------------------------------------------ helpers + @property + def is_available(self) -> bool: + """Check if Ollama server is reachable.""" + try: + req = Request(f"{self._host}/api/tags", method="GET") + with urlopen(req, timeout=2) as response: + return response.status == 200 + except (URLError, HTTPError, TimeoutError): + return False + + @property + def default_model(self) -> str | None: + """Get the first available model.""" + models = self.list_models() + return models[0] if models else None + + def list_models(self, force_refresh: bool = False) -> list[str]: + """Return the available model names, caching the result for quick reuse.""" + if self._cached_models is not None and not force_refresh: + return list(self._cached_models) + + try: + req = Request(f"{self._host}/api/tags", method="GET") + with urlopen(req, timeout=5) as response: + data = json.loads(response.read().decode()) + except (URLError, HTTPError, TimeoutError) as exc: + raise OllamaClientError(f"Failed to list models: {exc}") from exc + + models: list[str] = [] + for item in data.get("models", []): + name = item.get("name") or item.get("model") + if name: + models.append(name) + + self._cached_models = models + return list(models) + + # ------------------------------------------------------------------ chat APIs + def chat( + self, + *, + model: str, + messages: Iterable[Dict[str, str]], + ) -> dict[str, str] | None: + """Execute a blocking chat call against Ollama.""" + payload = { + "model": model, + "messages": list(messages), + "stream": False, + } + + try: + req = Request( + f"{self._host}/api/chat", + data=json.dumps(payload).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method="POST", + ) + with urlopen(req, timeout=120) as response: + result = json.loads(response.read().decode()) + except (URLError, HTTPError, TimeoutError) as exc: + return { + "role": "assistant", + "content": f"Unable to reach Ollama: {exc}", + } + + # Parse the response + message = result.get("message") + if not message: + return {"role": "assistant", "content": ""} + + role = message.get("role", "assistant") + content = message.get("content", "") + + return {"role": role, "content": content} + + def stream_chat( + self, *, model: str, messages: Iterable[Dict[str, str]] + ) -> Iterator[dict[str, Any]]: + """Placeholder for streaming API - not yet implemented.""" + raise NotImplementedError("Streaming chat is not yet implemented") + + # ------------------------------------------------------------------ internals + def _make_request( + self, endpoint: str, method: str = "GET", data: dict | None = None + ) -> dict: + """Make an HTTP request to the Ollama API.""" + url = f"{self._host}{endpoint}" + + if data: + req = Request( + url, + data=json.dumps(data).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method=method, + ) + else: + req = Request(url, method=method) + + try: + with urlopen(req, timeout=30) as response: + return json.loads(response.read().decode()) + except (URLError, HTTPError) as exc: + raise OllamaClientError(f"Request failed: {exc}") from exc diff --git a/ollama_client.py b/ollama_client.py index 5bd2274..e057418 100644 --- a/ollama_client.py +++ b/ollama_client.py @@ -25,17 +25,39 @@ class OllamaClient: self._host = host self._client = None self._cached_models: list[str] | None = None + self._is_available = False if ollama is None: return - if host and hasattr(ollama, "Client"): - self._client = ollama.Client(host=host) # type: ignore[call-arg] + # Try to initialize client and check connection + try: + if host and hasattr(ollama, "Client"): + self._client = ollama.Client(host=host) # type: ignore[call-arg] + + # Test connection by attempting to list models + self._check_connection() + except Exception: + # Silently fail - availability flag remains False + pass # ------------------------------------------------------------------ helpers + def _check_connection(self) -> None: + """Check if Ollama is available and update internal flag.""" + if ollama is None: + self._is_available = False + return + + try: + # Attempt a simple list call to verify connection + self._call_sdk("list") # type: ignore[arg-type] + self._is_available = True + except Exception: + self._is_available = False + @property def is_available(self) -> bool: - return ollama is not None + return self._is_available @property def default_model(self) -> str | None: @@ -52,7 +74,13 @@ class OllamaClient: try: response = self._call_sdk("list") # type: ignore[arg-type] + # Update availability flag on successful call + self._is_available = True except OllamaClientError: + self._is_available = False + return [] + except Exception: + self._is_available = False return [] models: list[str] = [] @@ -84,10 +112,16 @@ class OllamaClient: ) -> dict[str, str] | None: """Execute a blocking chat call against Ollama.""" if not self.is_available: - return { - "role": "assistant", - "content": "Ollama SDK is not installed; install `ollama` to enable responses.", - } + if ollama is None: + return { + "role": "assistant", + "content": "Ollama SDK is not installed; install `ollama` to enable responses.", + } + else: + return { + "role": "assistant", + "content": "Ollama is not running. Start Ollama with: ollama serve", + } try: result = self._call_sdk( @@ -96,10 +130,19 @@ class OllamaClient: messages=list(messages), stream=False, ) + # Update availability flag on successful call + self._is_available = True except OllamaClientError as exc: + self._is_available = False return { "role": "assistant", - "content": f"Unable to reach Ollama: {exc}", + "content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve", + } + except Exception as exc: + self._is_available = False + return { + "role": "assistant", + "content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve", } # Handle both dict responses (old SDK) and Pydantic objects (new SDK)