From 239242e2fc6048feb5c57e1336c06ae71aeb2b46 Mon Sep 17 00:00:00 2001 From: Melvin Ragusa Date: Sun, 26 Oct 2025 09:10:31 +0100 Subject: [PATCH] refactor(aisidebar): restructure project and implement reasoning mode toggle - Reorganize project structure and file locations - Add ReasoningController to manage model selection and reasoning mode - Update design and requirements for reasoning mode toggle - Implement model switching between Qwen3-4B-Instruct and Qwen3-4B-Thinking models - Remove deprecated files and consolidate project layout - Add new steering and specification documentation - Clean up and remove unnecessary files and directories - Prepare for enhanced AI sidebar functionality with more flexible model handling --- .gitignore | 8 - .kiro/specs/ai-sidebar-enhancements/design.md | 37 +- .../ai-sidebar-enhancements/requirements.md | 10 +- .kiro/specs/ai-sidebar-enhancements/tasks.md | 46 +- .kiro/steering/product.md | 23 + .kiro/steering/structure.md | 70 ++ .kiro/steering/tech.md | 63 + AGENTS.md | 38 - FIXES.md | 212 ---- aisidebar/README.md => README.md | 12 +- aisidebar/__init__.py => __init__.py | 0 __pycache__/__init__.cpython-313.pyc | Bin 0 -> 224 bytes __pycache__/aisidebar.cpython-313.pyc | Bin 0 -> 2975 bytes __pycache__/chat_widget.cpython-313.pyc | Bin 0 -> 39423 bytes __pycache__/command_processor.cpython-313.pyc | Bin 0 -> 3556 bytes .../conversation_archive.cpython-313.pyc | Bin 0 -> 7879 bytes .../conversation_manager.cpython-313.pyc | Bin 0 -> 11023 bytes __pycache__/ollama_client.cpython-313.pyc | Bin 0 -> 9161 bytes __pycache__/ollama_monitor.cpython-313.pyc | Bin 0 -> 4769 bytes .../reasoning_controller.cpython-313.pyc | Bin 0 -> 5779 bytes __pycache__/streaming_handler.cpython-313.pyc | Bin 0 -> 9532 bytes aisidebar/aisidebar.py => aisidebar.py | 0 aisidebar/chat_widget.py | 192 --- aisidebar/conversation_manager.py | 173 --- aisidebar/ollama_client.py | 130 -- chat_widget.py | 1072 +++++++++++++++++ command_processor.py | 87 ++ config.py | 8 - conversation_archive.py | 180 +++ conversation_manager.py | 33 + data/conversations/.gitkeep | 0 .../archive_20251026_063735_0790d478.json | 39 +- .../archive_20251026_064314_9ec25159.json | 93 ++ .../archive_20251026_064414_f8639758.json | 23 + .../archive_20251026_064712_da9eaa9a.json | 23 + .../archive_20251026_064923_2820888b.json | 23 + .../archive_20251026_065520_f3077a13.json | 23 + .../archive_20251026_070313_12321467.json | 93 ++ .../archive_20251026_070439_8a492c00.json | 43 + .../archive_20251026_070825_37517bf3.json | 53 + .../archive_20251026_071216_a2983057.json | 33 + .../archive_20251026_071416_41dd825d.json | 33 + .../archive_20251026_073127_5e8a9546.json | 23 + .../archive_20251026_073238_0462b871.json | 33 + .../archive_20251026_073517_b1adec2f.json | 53 + .../archive_20251026_074254_7e273f2b.json | 83 ++ .../archive_20251026_074828_f11452f2.json | 23 + .../archive_20251026_075259_179ebacd.json | 43 + .../archive_20251026_075756_53b92057.json | 43 + .../archive_20251026_080206_48becd70.json | 23 + .../archive_20251026_080452_8beb9b71.json | 23 + data/conversations/default.json | 22 + inspiration/quickcenter/__init__.py | 5 - inspiration/quickcenter/quickcenter.py | 150 --- inspiration/quickcenter/widgets/__init__.py | 7 - .../quickcenter/widgets/notificationcenter.py | 98 -- inspiration/quickcenter/widgets/sliders.py | 94 -- main.py | 78 -- message_widget.py | 8 - ollama_client.py | 267 ++-- ollama_monitor.py | 103 ++ pyproject.toml | 16 - reasoning_controller.py | 127 ++ requirements.txt | 5 - run.sh | 19 - sidebar-plan.md | 640 ---------- sidebar_window.py | 223 ---- streaming_handler.py | 222 ++++ style.css | 74 ++ styles.css | 5 - tests/__init__.py | 0 tests/test_conversation_manager.py | 34 - tests/test_smoke.py | 25 - 73 files changed, 3094 insertions(+), 2348 deletions(-) delete mode 100644 .gitignore create mode 100644 .kiro/steering/product.md create mode 100644 .kiro/steering/structure.md create mode 100644 .kiro/steering/tech.md delete mode 100644 AGENTS.md delete mode 100644 FIXES.md rename aisidebar/README.md => README.md (91%) rename aisidebar/__init__.py => __init__.py (100%) create mode 100644 __pycache__/__init__.cpython-313.pyc create mode 100644 __pycache__/aisidebar.cpython-313.pyc create mode 100644 __pycache__/chat_widget.cpython-313.pyc create mode 100644 __pycache__/command_processor.cpython-313.pyc create mode 100644 __pycache__/conversation_archive.cpython-313.pyc create mode 100644 __pycache__/conversation_manager.cpython-313.pyc create mode 100644 __pycache__/ollama_client.cpython-313.pyc create mode 100644 __pycache__/ollama_monitor.cpython-313.pyc create mode 100644 __pycache__/reasoning_controller.cpython-313.pyc create mode 100644 __pycache__/streaming_handler.cpython-313.pyc rename aisidebar/aisidebar.py => aisidebar.py (100%) delete mode 100644 aisidebar/chat_widget.py delete mode 100644 aisidebar/conversation_manager.py delete mode 100644 aisidebar/ollama_client.py create mode 100644 chat_widget.py create mode 100644 command_processor.py delete mode 100644 config.py create mode 100644 conversation_archive.py delete mode 100644 data/conversations/.gitkeep rename aisidebar/data/conversations/default.json => data/conversations/archive_20251026_063735_0790d478.json (64%) create mode 100644 data/conversations/archive_20251026_064314_9ec25159.json create mode 100644 data/conversations/archive_20251026_064414_f8639758.json create mode 100644 data/conversations/archive_20251026_064712_da9eaa9a.json create mode 100644 data/conversations/archive_20251026_064923_2820888b.json create mode 100644 data/conversations/archive_20251026_065520_f3077a13.json create mode 100644 data/conversations/archive_20251026_070313_12321467.json create mode 100644 data/conversations/archive_20251026_070439_8a492c00.json create mode 100644 data/conversations/archive_20251026_070825_37517bf3.json create mode 100644 data/conversations/archive_20251026_071216_a2983057.json create mode 100644 data/conversations/archive_20251026_071416_41dd825d.json create mode 100644 data/conversations/archive_20251026_073127_5e8a9546.json create mode 100644 data/conversations/archive_20251026_073238_0462b871.json create mode 100644 data/conversations/archive_20251026_073517_b1adec2f.json create mode 100644 data/conversations/archive_20251026_074254_7e273f2b.json create mode 100644 data/conversations/archive_20251026_074828_f11452f2.json create mode 100644 data/conversations/archive_20251026_075259_179ebacd.json create mode 100644 data/conversations/archive_20251026_075756_53b92057.json create mode 100644 data/conversations/archive_20251026_080206_48becd70.json create mode 100644 data/conversations/archive_20251026_080452_8beb9b71.json create mode 100644 data/conversations/default.json delete mode 100644 inspiration/quickcenter/__init__.py delete mode 100644 inspiration/quickcenter/quickcenter.py delete mode 100644 inspiration/quickcenter/widgets/__init__.py delete mode 100644 inspiration/quickcenter/widgets/notificationcenter.py delete mode 100644 inspiration/quickcenter/widgets/sliders.py delete mode 100644 main.py delete mode 100644 message_widget.py create mode 100644 ollama_monitor.py delete mode 100644 pyproject.toml create mode 100644 reasoning_controller.py delete mode 100644 requirements.txt delete mode 100755 run.sh delete mode 100644 sidebar-plan.md delete mode 100644 sidebar_window.py create mode 100644 streaming_handler.py create mode 100644 style.css delete mode 100644 styles.css delete mode 100644 tests/__init__.py delete mode 100644 tests/test_conversation_manager.py delete mode 100644 tests/test_smoke.py diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 8f3a7e4..0000000 --- a/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -.venv/ -__pycache__/ -*.pyc -.pytest_cache/ -.ruff_cache/ -data/conversations/*.json -data/conversations/index.json -data/conversations/*.tmp diff --git a/.kiro/specs/ai-sidebar-enhancements/design.md b/.kiro/specs/ai-sidebar-enhancements/design.md index 79202b9..b334978 100644 --- a/.kiro/specs/ai-sidebar-enhancements/design.md +++ b/.kiro/specs/ai-sidebar-enhancements/design.md @@ -233,7 +233,11 @@ class ConversationArchive: ```python class ReasoningController: - """Manages reasoning mode state and API parameters.""" + """Manages reasoning mode state and model selection.""" + + # Model names for reasoning toggle + INSTRUCT_MODEL = "hf.co/unsloth/Qwen3-4B-Instruct-2507-GGUF:Q8_K_XL" + THINKING_MODEL = "hf.co/unsloth/Qwen3-4B-Thinking-2507-GGUF:Q8_K_XL" def __init__(self): self._enabled = False @@ -245,8 +249,9 @@ class ReasoningController: def toggle(self) -> bool: """Toggle reasoning mode and persist preference.""" - def get_chat_options(self) -> dict: - """Return Ollama API options for reasoning mode.""" + def get_model_name(self) -> str: + """Return the appropriate model name based on reasoning mode.""" + return self.THINKING_MODEL if self._enabled else self.INSTRUCT_MODEL ``` #### UI Components @@ -254,41 +259,35 @@ class ReasoningController: Add toggle button to header area: ```python -self._reasoning_toggle = Gtk.ToggleButton(label="🧠 Reasoning") -self._reasoning_toggle.connect("toggled", self._on_reasoning_toggled) +self._reasoning_toggle = widgets.Button(label="🧠 Reasoning: OFF") +self._reasoning_toggle.connect("clicked", self._on_reasoning_toggled) ``` #### Ollama Integration -When reasoning mode is enabled, pass additional options to Ollama: +When reasoning mode is toggled, switch between models: ```python -# Standard mode -ollama.chat(model=model, messages=messages) +# Get model based on reasoning mode +model = self._reasoning_controller.get_model_name() -# Reasoning mode (model-dependent) -ollama.chat( - model=model, - messages=messages, - options={ - "temperature": 0.7, - # Model-specific reasoning parameters - } -) +# Use the selected model for chat +ollama.chat(model=model, messages=messages) ``` #### Message Formatting -When reasoning is enabled and model supports it: +When using the thinking model: - Display thinking process in distinct style (italic, gray text) - Separate reasoning from final answer with visual divider -- Use expandable/collapsible section for reasoning (optional) +- Parse `` tags from model output to extract reasoning content #### Persistence - Save reasoning preference to `~/.config/aisidebar/preferences.json` - Load preference on startup - Apply to all new conversations +- Automatically switch models when preference changes ## Data Models diff --git a/.kiro/specs/ai-sidebar-enhancements/requirements.md b/.kiro/specs/ai-sidebar-enhancements/requirements.md index c805adb..5e7b7d4 100644 --- a/.kiro/specs/ai-sidebar-enhancements/requirements.md +++ b/.kiro/specs/ai-sidebar-enhancements/requirements.md @@ -62,15 +62,15 @@ This document outlines the requirements for enhancing the AI sidebar module for ### Requirement 5: Reasoning Mode Toggle -**User Story:** As a user, I want to enable or disable the model's reasoning output, so that I can choose whether to see the thinking process or just the final answer based on my needs. +**User Story:** As a user, I want to toggle between a reasoning model and an instruct model, so that I can choose whether to use a model that shows its thinking process or one that provides direct answers. #### Acceptance Criteria -1. THE AI Sidebar SHALL provide a toggle button or control to enable reasoning mode -2. WHEN reasoning mode is enabled, THE AI Sidebar SHALL request and display the model's thinking process before the final answer -3. WHEN reasoning mode is disabled, THE AI Sidebar SHALL request and display only the final answer without intermediate reasoning +1. THE AI Sidebar SHALL provide a toggle button or control to switch between reasoning and instruct models +2. WHEN reasoning mode is enabled, THE AI Sidebar SHALL switch to the Qwen3-4B-Thinking model and display the model's thinking process +3. WHEN reasoning mode is disabled, THE AI Sidebar SHALL switch to the Qwen3-4B-Instruct model for direct answers 4. THE AI Sidebar SHALL persist the reasoning mode preference across conversation sessions -5. THE AI Sidebar SHALL visually distinguish reasoning content from final answer content when reasoning mode is enabled +5. THE AI Sidebar SHALL visually distinguish reasoning content from final answer content when using the thinking model ### Requirement 6: Graceful Ollama Unavailability Handling diff --git a/.kiro/specs/ai-sidebar-enhancements/tasks.md b/.kiro/specs/ai-sidebar-enhancements/tasks.md index 3e959ce..1d5a8a8 100644 --- a/.kiro/specs/ai-sidebar-enhancements/tasks.md +++ b/.kiro/specs/ai-sidebar-enhancements/tasks.md @@ -1,19 +1,19 @@ # Implementation Plan -- [ ] 1. Implement streaming response infrastructure +- [x] 1. Implement streaming response infrastructure - Create StreamingHandler class in new file `streaming_handler.py` with token buffering, UI update methods, and stream state management - Add `_handle_stream_token()` method to SidebarWindow that uses GLib.idle_add for thread-safe UI updates - Implement token buffering logic (accumulate 3-5 tokens before UI update) to reduce overhead - _Requirements: 1.1, 1.2, 1.3, 1.4_ -- [ ] 2. Integrate streaming into SidebarWindow +- [x] 2. Integrate streaming into SidebarWindow - Modify `_request_response()` to use `ollama_client.stream_chat()` instead of blocking `chat()` - Update worker thread to iterate over stream and call `_handle_stream_token()` for each chunk - Add streaming state indicator (visual feedback during generation) - Handle stream errors and interruptions gracefully with try-except blocks - _Requirements: 1.1, 1.2, 1.3, 1.4_ -- [ ] 3. Replace single-line Entry with multi-line TextView +- [x] 3. Replace single-line Entry with multi-line TextView - Replace `Gtk.Entry` with `Gtk.TextView` wrapped in `Gtk.ScrolledWindow` in `_build_ui()` - Configure text view with word wrapping, min height 40px, max height 200px - Implement key event controller to handle Enter (submit) vs Shift+Enter (newline) @@ -21,37 +21,37 @@ - Update `_on_submit()` to extract text from TextView buffer instead of Entry - _Requirements: 2.1, 2.2, 2.3, 2.4_ -- [ ] 4. Create command processing system +- [x] 4. Create command processing system - Create `command_processor.py` with CommandProcessor class - Implement command parsing logic with `is_command()` and `execute()` methods - Define CommandResult dataclass for structured command responses - Add command registry dictionary mapping command strings to handler methods - _Requirements: 3.1, 3.2, 3.3, 3.4, 3.5_ -- [ ] 5. Implement conversation management commands -- [ ] 5.1 Implement `/new` and `/clear` commands +- [x] 5. Implement conversation management commands +- [x] 5.1 Implement `/new` and `/clear` commands - Add `_cmd_new_conversation()` method to save current conversation and reset to fresh state - Clear message list UI and show confirmation message - _Requirements: 3.1, 3.2_ -- [ ] 5.2 Implement `/models` command +- [x] 5.2 Implement `/models` command - Add `_cmd_list_models()` method to query and display available models - Format model list with current model highlighted - _Requirements: 3.3_ -- [ ] 5.3 Implement `/model` command +- [x] 5.3 Implement `/model` command - Add `_cmd_switch_model()` method to validate and switch active model - Update model label in header UI - _Requirements: 3.4_ -- [ ] 5.4 Integrate CommandProcessor into SidebarWindow +- [x] 5.4 Integrate CommandProcessor into SidebarWindow - Add CommandProcessor instance to SidebarWindow initialization - Modify `_on_submit()` to check for commands before processing as user message - Display command results as system messages with distinct styling - _Requirements: 3.5_ -- [ ] 6. Implement conversation archive system -- [ ] 6.1 Create ConversationArchive class +- [x] 6. Implement conversation archive system +- [x] 6.1 Create ConversationArchive class - Create `conversation_archive.py` with ConversationArchive class - Implement `list_conversations()` to scan storage directory and return metadata - Implement `archive_conversation()` to save with timestamp-based ID format @@ -59,60 +59,60 @@ - Define ConversationMetadata dataclass - _Requirements: 4.1, 4.2_ -- [ ] 6.2 Implement conversation loading +- [x] 6.2 Implement conversation loading - Add `load_conversation()` method to ConversationArchive - Handle JSON parsing errors and missing files gracefully - Return ConversationState compatible with existing ConversationManager - _Requirements: 4.4_ -- [ ] 6.3 Implement `/list` and `/resume` commands +- [x] 6.3 Implement `/list` and `/resume` commands - Add `_cmd_list_conversations()` to display archived conversations with metadata - Add `_cmd_resume_conversation()` to load and display selected conversation - Update SidebarWindow to repopulate message list from loaded conversation - _Requirements: 4.3, 4.4, 4.5_ -- [ ] 7. Implement reasoning mode toggle -- [ ] 7.1 Create ReasoningController class +- [x] 7. Implement reasoning mode toggle +- [x] 7.1 Create ReasoningController class - Create `reasoning_controller.py` with ReasoningController class - Implement preference persistence to `~/.config/aisidebar/preferences.json` - Add `toggle()`, `is_enabled()`, and `get_chat_options()` methods - Define PreferencesState dataclass - _Requirements: 5.4_ -- [ ] 7.2 Add reasoning toggle UI +- [x] 7.2 Add reasoning toggle UI - Add ToggleButton to header area in `_build_ui()` - Connect toggle signal to `_on_reasoning_toggled()` callback - Update button state from persisted preference on startup - _Requirements: 5.1_ -- [ ] 7.3 Integrate reasoning mode with Ollama calls +- [x] 7.3 Integrate reasoning mode with Ollama calls - Modify `_request_response()` to include reasoning options when enabled - Pass model-specific parameters via `get_chat_options()` - Handle both streaming and non-streaming modes with reasoning - _Requirements: 5.2, 5.3_ -- [ ] 7.4 Implement reasoning content formatting +- [x] 7.4 Implement reasoning content formatting - Add visual distinction for reasoning content (italic, gray text, or expandable section) - Separate reasoning from final answer with visual divider - Update message rendering to handle reasoning metadata - _Requirements: 5.5_ -- [-] 8. Implement graceful Ollama unavailability handling -- [ ] 8.1 Update OllamaClient initialization +- [x] 8. Implement graceful Ollama unavailability handling +- [x] 8.1 Update OllamaClient initialization - Modify `__init__()` to never raise exceptions during initialization - Add connection check that sets internal availability flag - Update `list_models()` to return empty list instead of raising on connection failure - Update `chat()` and `stream_chat()` to return error messages instead of raising - _Requirements: 6.1, 6.3, 6.5_ -- [ ] 8.2 Create OllamaAvailabilityMonitor +- [x] 8.2 Create OllamaAvailabilityMonitor - Create `ollama_monitor.py` with OllamaAvailabilityMonitor class - Implement periodic availability checking using GLib.timeout_add (30s interval) - Add callback mechanism to notify UI of state changes - Ensure checks are non-blocking and don't impact UI responsiveness - _Requirements: 6.4_ -- [ ] 8.3 Update SidebarWindow for Ollama unavailability +- [x] 8.3 Update SidebarWindow for Ollama unavailability - Initialize OllamaAvailabilityMonitor in SidebarWindow - Display "Ollama not running" status message when unavailable at startup - Update model label to show connection status @@ -120,7 +120,7 @@ - Add callback to re-enable features when Ollama becomes available - _Requirements: 6.1, 6.2, 6.4_ -- [ ] 8.4 Add user-friendly error messages +- [x] 8.4 Add user-friendly error messages - Display clear instructions when user tries to chat without Ollama - Show notification when Ollama connection is restored - Update all command handlers to check Ollama availability diff --git a/.kiro/steering/product.md b/.kiro/steering/product.md new file mode 100644 index 0000000..bb00026 --- /dev/null +++ b/.kiro/steering/product.md @@ -0,0 +1,23 @@ +--- +inclusion: always +--- + +# Product Overview + +AI Sidebar is a slide-in chat interface for the Ignis desktop environment that provides local AI assistance through Ollama integration. + +## Core Features + +- Slide-in sidebar from the left side with smooth animations +- Local AI chat using Ollama models +- Automatic conversation persistence across sessions +- Material Design 3 theming that matches Ignis +- Keyboard shortcut toggle support +- Automatic Ollama availability monitoring with graceful degradation + +## User Experience + +- Clicking outside the sidebar closes it (same as QuickCenter) +- Conversations are automatically saved to `~/.config/ignis/modules/aisidebar/data/conversations/` +- The UI gracefully handles Ollama being unavailable and notifies users when connectivity is restored +- Default width is 400px to match QuickCenter diff --git a/.kiro/steering/structure.md b/.kiro/steering/structure.md new file mode 100644 index 0000000..58f1d13 --- /dev/null +++ b/.kiro/steering/structure.md @@ -0,0 +1,70 @@ +--- +inclusion: always +--- + +# Project Structure + +## File Organization + +``` +aisidebar/ +β”œβ”€β”€ __init__.py # Module exports (AISidebar class) +β”œβ”€β”€ aisidebar.py # Main RevealerWindow implementation +β”œβ”€β”€ chat_widget.py # Chat UI widget with message handling +β”œβ”€β”€ ollama_client.py # HTTP client for Ollama REST API +β”œβ”€β”€ ollama_monitor.py # Availability monitoring with callbacks +β”œβ”€β”€ conversation_manager.py # Conversation persistence layer +└── data/ + └── conversations/ # JSON conversation files (auto-created) + └── default.json # Default conversation transcript +``` + +## Module Responsibilities + +### `aisidebar.py` +- Main window class extending `widgets.RevealerWindow` +- Handles slide-in animation from left side +- Manages window visibility and keyboard focus +- Integrates with Ignis WindowManager + +### `chat_widget.py` +- Complete chat UI implementation +- Message list rendering and scrolling +- Input handling and submission +- Background thread management for AI requests +- Ollama availability monitoring integration + +### `ollama_client.py` +- Low-level HTTP client for Ollama API +- Model listing with caching +- Blocking chat API calls +- Connection health checking +- Graceful error handling without exceptions + +### `ollama_monitor.py` +- Periodic availability checking (30s interval) +- Callback-based state change notifications +- GLib timeout integration for non-blocking checks + +### `conversation_manager.py` +- JSON-based conversation persistence +- Atomic file writes for data safety +- Message validation (system/user/assistant roles) +- Timestamp tracking for messages + +## Naming Conventions + +- Private methods/attributes: `_method_name`, `_attribute_name` +- Widget references: `self._widget_name` (e.g., `self._entry`, `self._message_list`) +- CSS classes: kebab-case (e.g., `ai-sidebar`, `ai-sidebar-content`) +- Constants: UPPER_SNAKE_CASE (e.g., `VALID_ROLES`, `DEFAULT_CONVERSATION_ID`) + +## Code Style + +- Type hints on function signatures +- Docstrings for classes and public methods +- Dataclasses for structured data (`ConversationState`) +- Context managers for file operations +- Property decorators for computed attributes +- Threading: daemon threads for background work +- Error messages: user-friendly with actionable instructions diff --git a/.kiro/steering/tech.md b/.kiro/steering/tech.md new file mode 100644 index 0000000..a7e45e7 --- /dev/null +++ b/.kiro/steering/tech.md @@ -0,0 +1,63 @@ +--- +inclusion: always +--- + +# Technology Stack + +## Framework & Environment + +- **Platform**: Ignis desktop environment (Python-based GTK4 framework) +- **Python Version**: 3.10+ +- **UI Framework**: GTK4 via Ignis widgets +- **Async/Threading**: GLib for main loop, Python threading for background tasks + +## Key Dependencies + +- `ignis` - Desktop environment framework providing widgets and window management +- `ollama` - Python package for Ollama API integration +- GTK4 (`gi.repository.GLib`) - UI toolkit and event loop + +## Architecture Patterns + +### Widget System +- Uses Ignis widget abstractions (`widgets.Box`, `widgets.RevealerWindow`, etc.) +- Material Design 3 styling via CSS classes +- Revealer-based slide animations + +### API Communication +- Direct HTTP calls to Ollama REST API (no external HTTP library) +- Uses `urllib.request` for HTTP operations +- Timeout handling: 2s for health checks, 5s for model lists, 120s for chat + +### State Management +- Conversation persistence via JSON files +- Atomic file writes using `tempfile` and `os.replace()` +- In-memory caching for model lists + +### Threading Model +- UI operations on GLib main thread +- AI requests in background daemon threads +- `GLib.idle_add()` for thread-safe UI updates + +### Error Handling +- Graceful degradation when Ollama is unavailable +- Availability monitoring with 30-second polling interval +- User-facing error messages instead of exceptions + +## Common Commands + +Since this is an Ignis module, there are no build/test commands. The module is loaded directly by Ignis: + +```bash +# Reload Ignis to apply changes +ignis reload + +# Run Ignis with console output for debugging +ignis + +# Check Ollama status +curl http://127.0.0.1:11434/api/tags + +# List installed Ollama models +ollama list +``` diff --git a/AGENTS.md b/AGENTS.md deleted file mode 100644 index 0d9b2e7..0000000 --- a/AGENTS.md +++ /dev/null @@ -1,38 +0,0 @@ -# Repository Guidelines - -## Project Structure & Module Organization -- `main.py` wires the GTK `Application` and guards headless runs; UI widgets live in `sidebar_window.py` and `message_widget.py`. -- `ollama_client.py` wraps streaming calls and threading helpers so GTK stays responsive. -- Conversation state persists through `conversation_manager.py` and JSON files under `data/conversations/`; keep writes atomic. -- Shared settings belong in `config.py`, styles in `styles.css`, and tooling defaults in `pyproject.toml`; prefer adding focused modules over bloating these. -- Tests mirror the source tree under `tests/`, with fixtures in `tests/fixtures/` for reusable transcripts and metadata. - -## Build, Test, and Development Commands -- `python -m venv .venv && source .venv/bin/activate` β€” creates and activates the project’s virtual environment. -- `pip install -r requirements.txt` β€” installs GTK, Ollama, and tooling dependencies. -- `python main.py` β€” launches the sidebar; requires a Wayland/X11 session. -- `AI_SIDEBAR_HEADLESS=1 python main.py` β€” skips GTK startup for CI smoke checks. -- `AI_SIDEBAR_HEADLESS=1 pytest` β€” runs the full test suite; combine with `-k "conversation"` for targeted checks. - -## Coding Style & Naming Conventions -- Use 4-space indentation and format with `black .`; avoid tab characters. -- Lint via `ruff check .` and fix violations instead of adding ignores. -- Files stay snake_case; classes use PascalCase; callbacks adopt verb-based snake_case (e.g., `handle_stream_chunk`). -- Keep GTK signal handlers thin and delegate behavior to helpers or managers. - -## Testing Guidelines -- Prefer `pytest` parameterized cases for conversation flows; store golden transcripts in `tests/fixtures/responses/`. -- Name tests `test__` (e.g., `test_conversation_manager_persists_history`). -- Cover threading boundaries by mocking Ollama responses and asserting GTK updates via `GLib.idle_add`. -- Use `AI_SIDEBAR_HEADLESS=1` when exercising tests or scripts in non-GUI environments. -- Run `pytest --maxfail=1` before commits to catch regressions early. - -## Commit & Pull Request Guidelines -- Follow the Conventional Commit pattern (`feat:`, `fix:`, `refactor:`) to keep the changelog clean. -- Keep commits scoped to a single concern and include tests or fixtures when changing behavior. -- Pull requests should summarize user-facing changes, list manual test steps, and attach screenshots or recordings for UI tweaks. -- Reference related issues with `Closes #ID` and call out follow-up tasks to keep planning clear. - -## Agent Workflow Tips -- When prototyping new UI flows, add exploratory scripts under `experiments/` and clean them up before merging. -- Document new configuration toggles in `config.py` docstrings and echo them in the README so users can discover them easily. diff --git a/FIXES.md b/FIXES.md deleted file mode 100644 index cde8e42..0000000 --- a/FIXES.md +++ /dev/null @@ -1,212 +0,0 @@ -# GTK4 Layer Shell and Ollama Fixes - -## Problems Identified - -You were experiencing multiple issues when running the application: - -1. **"Failed to initialize layer surface, not on Wayland"** -2. **Multiple "GtkWindow is not a layer surface"** warnings (9 times) -3. **"GtkText - did not receive a focus-out event"** warnings -4. **"No content received from Ollama"** - Ollama responses not working - -## Root Causes - -### 1. Wrong GDK Backend -Your environment had `GDK_BACKEND=x11` set, which forces GTK to use XWayland instead of native Wayland. GTK4 Layer Shell **only works with native Wayland**, not XWayland. - -### 2. Initialization Order -The layer shell was being initialized **after** window properties (title, size) were set. GTK4 Layer Shell must be initialized **immediately** after `super().__init__()`. - -### 3. Library Linking Order -The GTK4 Layer Shell library needs to be loaded before `libwayland-client.so`, which requires using `LD_PRELOAD`. - -### 4. Missing Focus Event Handler -The `Gtk.Entry` widget wasn't properly handling focus-out events, causing GTK to emit warnings. - -### 5. Virtual Environment Not Activated -The launcher script wasn't activating the Python virtual environment (`.venv`), so the `ollama` package wasn't available even though it was installed in the venv. - -### 6. Ollama SDK API Change (Pydantic Objects) -The newer `ollama` package (v0.6.0) returns Pydantic objects instead of dictionaries. The `OllamaClient` code was using `.get()` methods which don't work on Pydantic objects, causing responses to appear empty. This caused all Ollama API calls to return empty content with "No content received from Ollama". - -## Fixes Applied - -### 1. Reordered Initialization ([sidebar_window.py:26-41](sidebar_window.py#L26-L41)) -```python -def __init__(self, **kwargs) -> None: - super().__init__(**kwargs) - - # CRITICAL: Layer shell must be initialized BEFORE any window properties - self._setup_layer_shell() - - self.set_default_size(360, 720) - self.set_title("Niri AI Sidebar") - # ... rest of initialization -``` - -### 2. Added Error Detection ([sidebar_window.py:44-65](sidebar_window.py#L44-L65)) -```python -def _setup_layer_shell(self) -> None: - if Gtk4LayerShell is None: - return - - Gtk4LayerShell.init_for_window(self) - - # Verify initialization succeeded before configuring - if not Gtk4LayerShell.is_layer_window(self): - return - - # ... rest of layer shell configuration -``` - -### 3. Added Focus Event Handler ([sidebar_window.py:110-113](sidebar_window.py#L110-L113), [sidebar_window.py:173-176](sidebar_window.py#L173-L176)) -```python -# Add focus event controller to properly propagate focus-out events -focus_controller = Gtk.EventControllerFocus() -focus_controller.connect("leave", self._on_entry_focus_out) -self._entry.add_controller(focus_controller) -``` - -### 4. Created Launcher Script ([run.sh](run.sh)) -```bash -#!/bin/bash -# Get the directory where this script is located -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" - -# Activate virtual environment if it exists -if [ -f "$SCRIPT_DIR/.venv/bin/activate" ]; then - source "$SCRIPT_DIR/.venv/bin/activate" -fi - -# Force GTK to use native Wayland backend (not XWayland) -export GDK_BACKEND=wayland - -# Preload GTK4 Layer Shell library to ensure proper initialization -export LD_PRELOAD=/usr/lib/libgtk4-layer-shell.so - -# Run the application -exec python3 "$SCRIPT_DIR/main.py" "$@" -``` - -**Key additions:** -- Activates `.venv` if present (fixes Ollama integration) -- Sets `GDK_BACKEND=wayland` (forces native Wayland) -- Preloads GTK4 Layer Shell library (fixes linking order) - -### 5. Added Environment Detection ([main.py:41-50](main.py#L41-L50)) -Warns users if they're running with the wrong backend configuration. - -### 6. Fixed Ollama SDK Compatibility ([ollama_client.py:59-76](ollama_client.py#L59-L76), [94-109](ollama_client.py#L94-L109)) -Updated `OllamaClient` to handle both dictionary responses (old SDK) and Pydantic objects (new SDK v0.6.0+): - -```python -# Handle both dict responses (old SDK) and Pydantic objects (new SDK) -if isinstance(result, dict): - message = result.get("message") - role = message.get("role") or "assistant" - content = message.get("content") or "" -else: - # Pydantic object (ollama SDK >= 0.4.0) - message = getattr(result, "message", None) - role = getattr(message, "role", "assistant") - content = getattr(message, "content", "") -``` - -This ensures compatibility with both old and new versions of the `ollama` Python package. - -## How to Run - -**Use the launcher script:** -```bash -./run.sh -``` - -**Or set environment variables manually:** -```bash -GDK_BACKEND=wayland LD_PRELOAD=/usr/lib/libgtk4-layer-shell.so python3 main.py -``` - -**Do NOT run directly with `python3 main.py`** if you have `GDK_BACKEND=x11` in your environment, as this will cause the layer shell initialization to fail. - -## Expected Behavior - -After these fixes: -- βœ… No "Failed to initialize layer surface" warnings -- βœ… No "GtkWindow is not a layer surface" warnings -- βœ… Reduced "GtkText - did not receive a focus-out event" warnings (GTK4 internal issue, mostly mitigated) -- βœ… Window properly anchored to the left edge of your screen -- βœ… Window appears as a layer surface in Niri -- βœ… Ollama integration working - receives and displays responses -- βœ… Conversation history persisted properly - -## Testing - -Run the application with the launcher script and verify: -1. Minimal warnings in the console output (only harmless Vulkan warnings may appear) -2. Window appears on the left edge of the screen -3. Window stays anchored when switching workspaces -4. Text input works properly -5. Ollama responses are received and displayed -6. Conversations are saved and restored on restart - -### Quick Test -```bash -./run.sh -# Type a message in the UI and press Enter -# You should see a response from Ollama -``` - -## Troubleshooting - -### "No content received from Ollama" Error - -**Symptom:** The application displays "No content received from Ollama" or similar errors. - -**Causes:** -1. The `ollama` Python package is not installed -2. The virtual environment is not activated -3. Ollama server is not running - -**Solutions:** -```bash -# Ensure Ollama is installed and running -curl -s http://127.0.0.1:11434/api/tags - -# Install the ollama package in your venv -source .venv/bin/activate -pip install ollama - -# Always use the launcher script (it activates the venv) -./run.sh -``` - -### Layer Shell Initialization Fails - -**Symptom:** "Failed to initialize layer surface" warning appears. - -**Causes:** -1. `GDK_BACKEND=x11` is set (forces XWayland instead of native Wayland) -2. GTK4 Layer Shell library not installed -3. Not running on a Wayland compositor - -**Solutions:** -```bash -# Check your environment -echo $GDK_BACKEND # Should be empty or "wayland" -echo $WAYLAND_DISPLAY # Should show your Wayland display (e.g., "wayland-1") - -# Unset GDK_BACKEND if it's set to x11 -unset GDK_BACKEND - -# Install GTK4 Layer Shell (Arch Linux) -sudo pacman -S gtk4-layer-shell - -# Use the launcher script (it sets the correct environment) -./run.sh -``` - -## References - -- [GTK4 Layer Shell Documentation](https://github.com/wmww/gtk4-layer-shell) -- [GTK4 Layer Shell Linking Guide](https://github.com/wmww/gtk4-layer-shell/blob/main/linking.md) -- [Ollama Python Library](https://github.com/ollama/ollama-python) diff --git a/aisidebar/README.md b/README.md similarity index 91% rename from aisidebar/README.md rename to README.md index 609457a..169044e 100644 --- a/aisidebar/README.md +++ b/README.md @@ -26,23 +26,23 @@ For **Niri**, add this to your `~/.config/niri/config.kdl`: binds { // ... your other bindings - // Toggle AI Sidebar with Super+A (or any key you prefer) - Mod+A { spawn "ignis" "run" "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')"; } + // Toggle AI Sidebar with Super+G (or any key you prefer) + Mod+G { spawn "ignis" "run" "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')"; } } ``` For **Hyprland**, add this to your `~/.config/hypr/hyprland.conf`: ```conf -# Toggle AI Sidebar with Super+A -bind = SUPER, A, exec, ignis run "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')" +# Toggle AI Sidebar with Super+G +bind = SUPER, G, exec, ignis run "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')" ``` For **Sway**, add this to your `~/.config/sway/config`: ``` -# Toggle AI Sidebar with Super+A -bindsym $mod+A exec ignis run "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')" +# Toggle AI Sidebar with Super+G +bindsym $mod+G exec ignis run "ignis.window_manager.WindowManager.get_default().toggle_window('AISidebar')" ``` ### Requirements diff --git a/aisidebar/__init__.py b/__init__.py similarity index 100% rename from aisidebar/__init__.py rename to __init__.py diff --git a/__pycache__/__init__.cpython-313.pyc b/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b30ca0dd710538414f9436a4241e0cf1518a95a GIT binary patch literal 224 zcmey&%ge<81kV}&W>^8~#~=<2FhLog#ej^d48aV+jNS}hj75wJ48ctLj73c8%$h7O z8G(|TjJG%)J%ck-Qj-#kikN{Sewxg;I1@9Ak%Vrs$Hyn;t*!HyTKiHp9r!K@^G(dp$C2tPwVtpB~b4gKlJnzE3Jonsl z&)5B(b9J1{We|*Vw&ya>R?l!u`5l&%r;&$Rzk|aYqd7Hl_ z5CLdj6K#S6n2Y`B_uo+y4Zf@x0 z732=>e*gMSg&9-2T*YS#&juM!u^eTSxLd$872DhdKClhuV?1&|u!xV}0oPWzXXsvU zH_q7_vU|LHe0;E$imB5tWR}wsT2kYy+#0II!JZ1BDyInx0%+{%(NZ8Ooe}Ahc8mmc z@qUeYer{;ueAiP2o$<8qoER)8|BqHp($bo!WfpjcJEP%3OhK%0RazFXB!hh7jC}qg zi&_q{l33#)2Ym}kCwZ@jzB_R3zqyLg8n>2YQlZ{{mFa9Uv@12L6iy^ujU zrhheH_i}M>Eauk6W9syKS_g2x7Zc>Saw-#3S`qk{;??PQzI&?*Y9MuJ^$ua{U+iXd z>nZ~m;8GQ+v~3dKGIX1YUQ0JDr$NQ0`Jkmcb(%Fi4;!}bd8S7NqiNZ7>j_*N6+B;u zQ&$+B4FQaa)oA)EPnfT?;1k{PEZ=e+TyGPd5w!md)8A>CG}Y8?tKqx>at81M{2yO| z2_O!0G-_<;+|alfL{M*uRl=%FP0+skVcFxYsP3t3*sf>dhznhSd*CtbDfB1Rrn_ZU zTbA=-bBo2gT3uC+s@Wn16csOu~AQm@A6Ft!}a$9Qhp zb|IhM3J{UhG+6WcVcJ7~df1s*JeXMg=hETCjbmUTm8ar%lyJh}Pt%6(;7!+PdsKq_ z8MbA7w3}_Yt#<2b7>SxBOj(p_$%PBF^&Kyao{HTiO`8u4yX{%qCK%vZN1;M^M>OfX zEt=YJec#YD0Z?PP))Dza9XcjEu zy1A*hZJ+WEJOoq(tBz^-bO8HqqhXt1Gs*=N2Je~nW_UgZ&_-lY-qI}x_wtOR3c=W8 zW)v8Eq|B@?Fsu3h#RuyKUIh^N-5&a8^zt8<_xVGqbdo}2l|L{2VJR3I35r8OaU>YI z{8?X6c&k&W926>_=P$j^rp7X%@KGO{xw_AHq|$*@I+UgYNsj1_R5_3;f4OleT>*u~ zQ8@d&PzmzGo&4lMelk!h!I&Bp%CFNxF|#k744~4)?;k#W7!*tFZ+P^pp&PJQ%IwQ0 zeQfWOr=J8PQ=jz*`SDChi+s2#qjcXR5v&J8Z?nBnraaEv9{wKK3-Y5uae@}+ zkA_fsx~A%dKg2}@l%+dv9|%f=Lc{vc+u=0@8VMoD{Sba(BIKTuV#&Pm~$h3 zd!fs64Ngt$&fN*Iq{LWK?l(`lm|NU*nvFOk*5#j=`ZD(BLT^!F#q5nJ4@IPA+p4#u&P*nscBYltSewl zGnn-N=$k;x7=PT>?e0!L#+w%LeA{wN$AxVX zE%ZxgCf$dgojsP_-LY)XMD(P0Om}8ym}DlSc_i^>r~7!%$)2DZbOTfE@hB6=iF2|! zv@G{}db4Nu`)@q~6(zenC93Myeb>GJ{qO&}KRzul_i(t}Kl^_#dVh!G{sX<}mrDzL z`sWDzfD<{@ z>!o^u3vlh6=xyM{@@I-C2lWn{&ppLzSPCvULKPp=%S5P1DSbXRw-8Mxl;X&hE1_b= z`Pk{$1*Q10O(d2irCf`dj3$;BSq~T6sYrB4#bS}<40F33VsQia951=VU83`ulHEn33-8Y798#%R(!fc)SSs?OyT3^E zJmVJ2@ZbB42P5nj`My$)+s%s=EPs(yMyV)YN$-+Z^r3XQScQ9qSpAF#DK#vmQuHIv zC)VO#CD!3yE!N{+BL;Byi$UCL#RlB##75lf#U`}Yj2fI`3#%a@wjxH*VoSs}78?}X zjeH#}ra|mP`9`sewZ4tTHHqDbYZiNOZxKVdw~E`J5m3Jy_3vPL+QeQ~Q#)e%SWJf{ zrk}-hT4DxROqV5Qki~4X#O!1--Qtkc6Xp&&q);ap+O2qwU5q5pGx+u$v{=)Gs#z1mU0lq@3dpIcUD8xJhdcE$1HXDR9%; zir_dsZi-th(vrDp%&&S`Qk{C-beSHqq?YJ$XxXcW&F3V2W5IE)1FrygqO!o>`8mSIivqE{>zD~6ruXM>)@ zQhu)((AC49MR*x2pmYMF*W49=MLQglJmcE@dWA)JFX+2~Nh`Ly(l5A(MDYDB*l%@TUZ?AI3MnousT=Qf{aIi!El@8Qkw zL8)=GHa%q9^Je6|qUSK5W)Cc5Bbn)X$Q&EYx`EqJXHw5$J|~|y!+Oa6`|`fTuBK~x zdGiT)cBpR*p6yC~-OR0r#FmGadN>BMYwO_{$gb2k^?sR8%lLoGjMYPv*Y%M3d`OAk zFmvi5bGEmbx$o#X&1V4?w)>hdf_H4n0g{gC?O1kBTiQWvWwe99Xot4pB9tAqDZ3@z zfbadTp5J^TcEx*Z~@> zbGU@T=Syaq9um9qVDV)=-h3A1_@0@fhYE81K#w<{1vy?dQ}mEd1F(L+rpKDky!1EB zcs-Pt{w4R7InekR!Qa*EGM@$ex8F?BLzB1kkoh$GvcQG5O{6?WP6%RbE_xv%DXv5^ zA|+F$$yjnR+BfuevEqU>A-brPT#PKn7UD|TY$7o;yBJ9%qKQnE0k#iiNEqHuX>F1jepMU&ClWOVMArDy1UMEIq9E_S(()%saAdbN4$tyRGgG>iym zCq5%4?Y`*o@jPa<2ze;qr?ev(SVA4O zfmyKBGb$t9f90COAAlU^!d&E#(q(er{WCK$h+}4E`VX+-NgP5z&2|_qCWsCE{5p3t znQa}GTZcDV_kW5QPAQJY=f*=$iP%krN9Si3W3!he;yIMki_sUZK!o!#kGCmEmP!*> zBD0tUX$K{{ARAho``C#v(s;;{k}*t-)Q1R(h?9^@B05BkU`0x=sx>MlSJ`~Wda;3^ zbxM?W;-*wXiZp}qNJ2<8b1@oQxR_*heM~F-Gk_|0BymZI2wz){CP;Agch`Uamworc ziO?r7P-B9J5doqlh`xkGRJs~Xm0gNn>$@UBC>5PkTxcf>z+8+*u11wo40U`VI`{Fr z)EB3S!TI%|LK09B;KD<2kyyTPIhK^#5v75F)P=CLjc&vff1grl`Ldb1_3hWbz0o=- z5rY>hVlyNWAN(=pNL5E-eHwuDY2y_tDHv}yq-^*kAWZIKAJueDanCF+MdoI()FcmL z0JNyU5CzUaYDcm_iR2@NX%ST*D3Pp2shCm6m0$+LsFcq|=OYm9&9J43W-lqNG)`Kr5sR#GMw=Lk;8>|fhnaA3=87R*tj4li zh$S$YdSXJN@5L9G>yZXZl`|^dSYk$NVKJ&yK!B=tb0!|S9F-s$0DzxnxXG&}sF=!w za?yC?0+u!UVOFijl^k78CYRz$l_gC@14{AHr5BX)nahAY4Die%daIO(v(zEXNrDA1 z?j_VVKNWo;`9v)GJU}Oj;LKGPrfyzXo}UNEdewOCtvrF0=cULM)>(x=k04=0#eM$l zG zmzI;5W)x7zS%*WVLY=qh-1%61Zs~adJF8KjM8!?R!w{rWN5T8inid~(l165GxHLFR`FZDiXc4_i!(S$ z0F9yl3@lxVcKEbBd^*#5h9aACyf@1?$$V3m56OIJ ztuwoQLf$^H!JkF;j$N7d-IT4BWowoB)(yT53EPJC);CKyuP{UmIe+8LlQ$2ft9w{Aoj1!- zO|yr38AMGT-FoYf$?cD2+E1w6AJ~;07{{MnzaxD*vf_Qet>>+l-)li<-ml%Cs|n=% z!B5<-c2-TxJs(%s{Oa^8(>Z@_&L7Ctw66NIZTsZ5ed+2)&{~~m#dFWA=cx@+wwh+5 zj|Sz|!F2UbJx>MK(sgV4_350N2USsR4WE>iw6K;M?p1RQZMRxpZ=nSIwYKM~>$26I za&;%V(|}CBU4h;-w%qD{z4wiNyjJF#gj;7{JDY9VD>v=UG(EyT9Mkhs<%^YBzDwr2 zemMKq;+u=vZ3pCS2R8VFMp}o=cdU+X@S*&eu?>E^5z{U6-D?hOOpna>tW|CBJB&Qr zWPaP~*R1t0$Z6Z)dyPD-RL$Cj4Spveq<<*ecUbN_ob5Ybe=38!lb8Rl$e^l;2y1^eanqyu1)&{>9 z<(gWvjk|u@xGU4RC(Q?Pd?3pUGB2#w6Ks#k+wr~Q1+nAu_VEq=vApWi-TOB9N6>k9 zd6o~#d@#$m$$Z;tXl*LnH!AmGAji;JE7sZIPg@2vEkpDHji@8bH_CjYIv1;RZ!NvK zgpOI_hh%g+3wb8YbbmN@$OA12=V)|=nTcJG(F_y48%ub=(1 zXR`;x^1<*1|AZx{y23a30ZUx3%=fN|>+Ku-9(-(9DBHPD?%YS;LJPgQ^VY=c6S?ZZ z`?Z6gxQexP2H>x4diB&Rr}PCubr=MH|tmAXBfem3Sbh&M6ID(+!(FA;>$05{8g z^GyI(oADtmr#4$OZ~#x+Hv$FJg0uph11#q4ki8wNV;kP>T2U1*w#$6`dweG&rWJ4i z(JLTQW|Bho@*#6x?tWu8ZbiRH@| zqLQ#QuV**vO)jyV&`b-F*(3}e1nmC?L*$u|2rj_IRUNZLi%F0)AOC;26n8R)x{{I0 zS3*vu(pq*VHm7)I!6iUMF%wBD9!BM=VP0=PRIU^+M&r^Ev?9HZn^H1^+LB;Pl2Yt4 z$TAI96@OmcI-^ExqeN0A=_VB}29qF>5J@^puYR2>vz7&`NVJsn3gz(3FE1`?E&8PB z<)y3Wpx!x&YFFGu(y=}z2x@6;1wkcxev{JkBoSNEg8vFx6K~;modZtsR<0zol_9w@ zl&u_+D~B>=yVk#+DLb0;R%7YbHDqfCk%f_5V^_9uP;MN|HSSnDDmM;bFOeH}t)F_QA$#Nr z`N$KQ#`C#g2%B@TV&%l9k1Mardbi2mZCUSr*}MPtiTAwYoAsQxiD8R80YE7_&k)2< z5cmP11w0EcJ5*7_9dF>cIH3YsT?JRzAv!1;&mww?ScJpH0(Yi~WRBVCw${Cn05tpJt9mE|3o=T|> zg@k-{fcpoOk_T{GG8jKc*2G2JKz7$RX6tv#^*gimBOCQ2E05)9vk+u25fycBwyt;I z4#`8K??%&JA>*C;q?ilr*ESdpL;G$z25$v-R=?@W_B}k%+zK$d@46^Rx4Uh$h7#zboflv@piFAl1sAqWunwJZ@u!rT2&eUNgq|k{rr*y zDe6V2SezurOH~)>#iGfHsI3I)5M}sTW_JUGr438!&D%HME zDc5FBi!awpvAUSlhmk(&O(lT-3H7(ZD)q_lBTZA{+h7E)bDPy%Ti062KdyLrdHvLi z_ZN`6)ZX-GgS+J5u59p-96Xc_9+rcLKZHPG$Ll+C!RA~bn5%CABi2~4atcIK>$Y2$ zU%#9S^sEKf%F=bak(jG*Aik`jD;H?VH4mnnc79UHw^pp2MQi1OthYt>w%q*ohIat0 zV13K0mtMJ~=ECj;Zm_;$<*`kkEB9x;owB!cbrNy{?~a_eUMu+ghPPYgAxUl%?7CU8 zdSR`8&7H0rDrozY^3vvtl`}+7>8*Zq!`pX<_F3vf*4rg}yH+o~=N|;hX+$7P91y{Hz4+GmUb|wa1aECmp!zx}> z(V2lBd*+uUpc$qH0pcN(>O@GjvjxV1AbmCo((BqHc%&3BGE_JsFytcrF?uv5{R!^U zzn~jXHz)lmZWGYhv6V@SlC4AOUs8zZcF=2`6lNsMj}b~xPOJmA2q09~fL`sUPRJ>3 zHtQlk3#kATgcPoG9|l@)mA_t|^Ecf*fAd7TdK*axYwB`UwYj>soF4*5k`z|*LC;FL zCSYig`36m>)3?D7GEoAF8Q$ZC`>A)DA?o#~Kf~<@+<;1tBV2erh>F5o7hTwf-Ogc4 z$y;Z)$Zr1i*=6+r8qcu0z#p`LKOi_N>A^VnsCS?Dw+ z0h%=(W)i}@v~-zmr{NPc*9_BEl1^zpCM7g|1)FlkN3n5IJSAcm2#Aytl{3`fK;fBk z1L{4dU(5%F8i>i!whjcUrmMvY3HPV(ComxV4=h>2`Ri8r9E3Z*cG=gS^$p3sp^R_$ ziYr%9BUiNMy#ANYzIgT~q&)2q`eyyxW&ie!zjtM9)5TT#GZn3$dbqlll`)15EQlle zGjH$RVkF%U8w3{&HVz>`w&%>s#X(jsamT|jgDHxE7^5guoN7|lLpt?B+K)t1pNCnr zU|huRF++nj1T;efBhd^z5F*i6gAN;HbQslIZ~?Mxu#papNVS16J+%**(!^Q>WfS9o zpYv!!VeoS`GDnodWzamT>I?e)WhfsBik89J;q(UuuaMr8G=w-H;xUUb%wkoEE2Y}@ zudwq3>H&lP`$z)Q) z{r##Y1R)HmG+7~B)dZU4%W>)&NyX5$&zw$o5b{@d`Ag-$ReodPJzfP!rBu@*=LsbZ z$h?BmTgn_LSHWe4$}RS%hm>OYqaMvjI1u`f^$`#&jjqH@A0ag zM4qsQ7+dhD9Cz7V%(!X%-pM&i;*yk{WOga;0 zM^V=UW+Jka#g$V4WNCdB1Lg*~+_>|(as_$o#kp=0u(X8UcA+Kv> z`vgigx*#nr!%%<=d8La;lxRm)t!|ina3)4pSYFyANDRo{czUB=NDGv?6ow9wWKz=Q z>8zoDPboav3$Tpy^y&sBx_FI}VOiEmF&;}ByivcCiX@08ByLXS<-SAN^C%TnU~m1x z^Msks;KnEwQk-|;51YoFt`O6IZaSOY(0@bqxm^x!&jyF(;BY3mf8}J3WRN{_eNVQ2 zk6gbeQ@?NJc&-L|w@$gHGh5Rq*YvGD_s&?R=H$xgM>X}T$P!G=hmGy4yVIfl*~SBM z1Wk{?Cb43!v-xTkp({oRde+Wroh(F7+)lN~olrj+6KuOv>Hi$P z-afE0`kr^&FPYuj8)Gfw4(=xo_qeAxZx5uU#ZdWgW8A+ODhGXTv%Bew9Vpk9emByK zk#4i}pX?Be^kpB%`O`@Q^`VE_rK8ix5|;U4J47(@JS4j_B0jVJH8ys^l12qZPug{LPWnb)XNNsX_J7uZl2q2 zS}H4QnHZl6xm7gqA5pRN5#9b1-DsgpY!RzVHbLh`r8g+{zu=~6t&|eA73sfH!v96L z|Aw2w$D_|PJaC_miYTXidgDHeuIsd_{}B1|SGBkLPHkhhcDr1=JyYAe;>pznvo+h~ znr)ey9qY-u3Y6ee4(7SXxJ0WWwVOg zal(-<>$+d4K|s%z$_fGxT`Jg#*sg%DS1$mLG2wa~7*Ly~fQ6uy!v2>B$n4FialyIZ z47pMr8kjHxD*=o`R3iN?33s)vCi?u8^dZ_Dp8(=!pl}gyY^zYGU(*VCNJay{*gD3)CMZwYUCe+ zlfyY^5Mq3aSQrZTmZ}Oy&5B^P|cd1_2}srF90So?o45 z>l_fR&eqspB7m7Lda!_isJb~&yLtJXC|Yna>TfqgBA@(n*qYi>cmF% zmN%mgh${ND8HFLFzzk`HDB0XXSOfv%7%kHQ@nUQ~xr2p)x=SPiEyZ(c?5UZ_XmVMK zD_#npIyb|zC|(#I$L3>EiJ7f>#N#K%r)JI^5l=}6QP2tjN(owps6Z*jbC%4NT_DvU zD=7elOVnVVh#jCnmYbU8?;3%?I5MeAy88R#tIxjtY^JPZ_39tjWOp8scOJ=f9|6Vj z{Y19DTdwcU)(^?`L(u5nsc!;WuBs(WyWwjCg@#<}griR5DnW=%_#a~Gf2E~Ont!7@ zGw?uG5=@0{8M~!zVGAF$kgY6Z1k?i95F18?0U`m>Ry{D-Nb^g;PHH0tUjh|+yd7%> zgCtu`p;;d3Hq7Cq9rI_jY5@N}hRp2B+ZiAkkrrH5Bs$xR97zevNr7*PO)jY`2b$>0^MNak|+7j1QtLz{O)Q(H> z@d>u$N zU19Vc+s~PBO;V`Xcj$HkH-*kuXlf0J!(NEaE&~I-OUZvgw_|iWO*f`?qf-{_r~#gd zD@*Z2R7WrQdkaagnTn(Uu@FL1Y76!l^_DyB?bqK{vX!Uh%F~(3i4`YQem*~x zRgC`2)bCM^olNsovQu1zUFkNc$TacbT(+)XuIq=zr$%13%Vq7^vOc-254M_RyX3N6 zcfjk__R6)r+1lN5?e6udOzp_ZII@#vQ@8BxUMu;dinl8^y!+JlY|JLf5{6l|oXrKA znN`at%&Nu1mDey`8`)9bd}4KKeRrB{%^!KNs)djWLzq9pKKCs5toPaSXDgnqWG-Eh zRCw7*+4KcXiv5g^1qaliRl`M>%O9Gz8nxCSx1Zd50JYkn^eKzm2>TpX7S+;Xz#K(A zgssLY)}p;Sd`XEVul<+Qqjv#}YEWzt8&Rr>OqeXCV8T~;p@mYhS!_Y6)?w%6ig0Ryb~^%O;Qu#wHi$k+<{yykXk%Isv!tuosHC zBC(4p<~-Be^t_Y$Qq?2yZ0DYQGfXbMO2ut}x3X~AyCM#7}5Zk97GDCI!dUYy!@yGP~Cv^K4RL*CV)2wZn^#R1Ri?A`8!Ew5{G=hBi zCaOoNOiB@^RwDk7PQuk#BIcG=S7M4+0^fqeWlS$Ui*m+AqeYVQPcPths0e#dk=-5; z2U)5E(GKKUG%7IT=XjErOgR=Y&FZxP*?<0fsCYsuNBXil*#8W}QH57F#5gEj1_DRz zDo&6Z^Dsm6pj7Dtqvw!R#rpN@OgxQ|U*Iyp%| z^xLEY>_9-O!w6V4yzJ1k(*MP}ub*V!NBzbOP{04_*^S*}tXHWD!-Q@`0I*8LltaO6 zz^rz59>)VUh$b7bJn=FyR189{V$KLcp!s{?d4JFm<#sfS9hIWvmv|A%*ia~T?Anyq zbWayHRPSLpJ{&HFsR(FhV&v?^#imqFhvmpoFhurwVaBBE0I|;-`;$G>hThXu7hDiJ zo8@eFBGE;x449d%`cN{+8GUetUAEs;3@sPI8!k9Mvd;rYk47Gt9UEUyvMq=0zGmHT zOAGSbv5FQ=+ZN;L7Cm|&^!mtk+!@T5z@05(ru>dc8zxhSk!9q*@}gx#4cbfndb+n0 z?AfrSVLt4(l(e4!%uLyVfj>#JXJ&f%cYc&bX&w{Eq@5h3JLysD~Dt&AD0DpjP1 zWc`}adc?R;oSvs)P5N#qX2DWq*E}w^kkHkf-E}T}+uNyq`l|?v1QQTuM%tqPB zg(|dxRC7vFE08d`?O2XqQtkNufCk1ec~#q~(8me3E7Tohv)0e4E8fdES#G=)M_-s# z-0ah4E+-bG7Bs?Sya~b!B+k_@eHSa8fw}S}=zH^wd$!)awCqz@8;NhAc3}U_qWNWFA?oHqxp^?#d_ZnKkZWjtzqxI- z=Z$A`O&wr!TMmB8Ia{i4jBR?kK=Z5DUcRqD8Ay*D1qb?$iU z)SIW)-M5D_ori9m$#wOOxNKX6Cc%g<^rwRK(8F=UHi&LVE;$;FyckT z9yzdQJ-!hbL!DZ$>egzr^#hsufxiwkf3g!@-rUO#4QF?b%R9$2JCDo0{gI&2^H_4>>teeJ9B;kpPX$TlADJ&r@k$R4vp_H|@^$5)ft?h(0rB-?#l?moWa`k>O!)Fy@$>1mh_6iAU^ zWe!D5b+26Ao2?#^t4CH!nGA_M60G7lG>N=$RQc5Fr?jK?IAl*=0n&UB01L+hUw`H6 zT1v1x9q3Vy`;+qloUVsm{H50~u|xa~uxf8bsm6OAE+{aA;_CWbuqPb|A%$#*kpR~L z<-b>+tL|hD2)3sKJJ41eN;Lx!xZy~F3E~do=zETifzw;M1SH zzz`~JN5K{t^ufEq!PgJws{QvFF7HSOdSSWTgp7fEetc)PsrRQ%y}9c4TvJa@=zzHh z&Lgsq{e`dCJ?=mscZ@qw?FQd{hxe=J(bG*34erXBudsRUM~`q^p!xM?sx04N8YcPh zKTv^#0B%@GvlH}9%zO?9;n^E%A7s)!(~dgbjw9^54e1`t>uFy4a^42a@JkafPUO(- zV3*O`iUIeW3ezPoXb=d$sdT|br=KAiR1Y|kk%OJ&ump6n)|0JF zK}^dkiwqHEpzY?ZY80aIL^on=dqX-`y)Q84*Nr8t!iAp2=t5-n8ielgJ_94bSU7oc zX)ZLy-Dd@>nDS^avZm`9QfE2oXB}vDhRoQnx6FiJtPQwu8lWLoWAVTA`k#{x< z`^-7j7F-oid<8a~49k*Coe?p*AXl_mA*bcmi)X|ytR8j+$Wu3)SZ z%cy`^b!K_C_Ti?W>!Zs0d<3$u5P|rEcPi^&`o@djxH-OhDpNVQ;>=YyyxRM6@6BtO z>QJ_NU~O`x^n=Q}T;1@>W2VGBMGlUJp3T8J0;F5V$}(1cgeUn69m9b!5*^TlB-?UK z5z;TLLaL?U(z&q+_$K=twwMHU>sSJ~$6g2FG_liyD^!yDnFcQ!kZ5!{bZuC#bAoo( zP(KQg=dHk?D73YzO|-@a3>atD^tH_9+DeP71t`Wq33+*c9k@u99$@`DlyV;!P(vPu znx@8qJ&s_!rGHH~!Xp~?Feg~?G8Y5dcjaRci7XfL?L0K3p+S(&+58g%qA64lAi|Gj zbgprcf9PH9N2R6LtKvk2* z%|JdnYLNC4FrpdbGB&8=h}wFCqI3IVF$p^CHvx!QP%~Ym)rB$CZ|4CCEn&t$<*#;w zr&8yYz3PBN>sdQl`fD$FHjS##dI}4lZ31i>H6%czVnqT{4iM;t#^p#Eq9N)W&oT#H66%&yO%rjXml z3`Fkd_XcroV|; zd{L4i1U6AHsc;pVkVq#ekw{OXPuIvb-6bh79=R$c><7Ur5(({S(y25LJGNkBGUzfoqSX`N>E*GRB+u# z9eEW_J)pv|_q^Lx-h-C2ei5WI#BxAgdA4-JgAhyH@uV+rwdplmo_2P#`U;5`Jt>%L z%d+w@(4ylkwv>3Wy?NH6u}ZNb+0Tst>J2Hp*}R#rpmrx}cfsJv#5UAs31Ul`_a?hl z$((#(HNrW@F3TET@Px`zJGIqfk|@NgV2>jA$2c%Tt|tqYO*6VFkrY~8JMSZ;@Y)fx zxm4BT&qw3C`gR}fo6^W1OBAV1rRCXVs&Z2Iw9Hr|Ayr9!f1yR4y$C8BJEXp3AW>mm zkshZ8&GjSFD_cKoCB2JR-NxKa#^h|JD=f%(FX=_qM4mwp<)6NS2Z*G{bMPq)Jboa*-ub^QY3$)1;kYfrC~e9#Nq z*q-$M3E0NcF?!}yaA|#h{mI)i?>_zR>2&8)@9|IHzhnsYvnBJ-*pivXt9sd~!c1i# z&d(ZKh}{NbrvQ6Mi88%q^EvI+!#b@6!2yPuE||wWxCb^Hm3h9pN3Dd5d<9#w{h0}i zv4dq)h!QBcCTj~K@H$P(bavVOzSD!$MPcj7WE+)`eo}>~PG-K{%sf3E6Gf~><{N-< zo>MpoMZShpZicO;O57_oBI#RyxF~r%sQ}oSQuR#EH?dNdz=K%8_&DPM<@8(Es_q!O3}Bw1?G#Q4>Kk7D_AB4c)c3&SXie>_`(EDn>VcOJ z{IKY)(l<-rs(iEZkNFH-vhL5+JhsArSXTbhz8Cksbm+xHH+!;W9dcPm&I@lC%m$C& z@a}xyTbcDX$=;@{m;6(_@2h#W?B%kouUqzYXK>EL*Mz97Z=dYj_gN`1%leh-B4>sk zvp;(1SE{Q7bs1PnuL*`w8kWK;EyZEXy_1?FbLP7tfgeM-2riyE`bg-~Ch}0Iy8h7Q zK1)){Ie;xIRWiVQE2K*4hXkUF2}H0!$~8bLO2mT@UA`Qh2$iY^xA6Fv?>C)%zF(e$ z^M~h2$9Gl#iAWstq-d%{)rKbEq=#bE4OfkWTGGg5Eu2sSEJ1BMuU_dXfUt$pl1^e| z@(*hj_An8k<@qDzvxpJ1{GiMauGf)sWKkX#H~1+-y`ZH9Ztu?S56k-vkE$dA(9(jp zN3#bWmk&I?!B5(jdtBacmsWop#~z=MN1oW=&li?sbv`8@Fm_X>kIm!%=*$gU$eRFy zJpKspANtQ!KHgaKp~L_Ca86(SE79MWaUE$)R1ePwgg( z;zsY{F4!`{LIFSGSuA=P1zJRJ1F-32XY_evaEyS3&&Bq-C0est+VoRc1>sKI zlUHF~%N*6|HAKFK@#Jks->~L1@`82<;|EEJ&ay3G8LvqjlEvUG++nv}y7|LpcBy9R zX@mX`8;Fxx~C5qHJo5#%QA=sH1BoORO0Ow=@ zvS8YL)TI9Cu_&C>6F<#7M9#)YR85ZE8DYZW3=AYJCrDCu;*Lxn-><;-Vy&kk6*q`cvO4F-^YiuqYe`v7YF}ipK5>UZ#P6s(uB&pXPg<@bSPM_Y3-?M?^kQmH2 zu4?0;p2MNFQd#c=Z@Yk9ye`5y1$Bn%YV7B?Da%K}LeG z1mp48O;vm~&1k?8&D+jqSP7-uN>x4}V;kTnaUD%v=YCPm`2ysCen@T|%Cs=V#x!D(^mXtLANfr92&&_@J^0`M~uxh1ROq+R}}^ zkBWxAfI?DBRu`n6HmR<@mBqt^=rZPqnWmSx1G0ZGHnNuC<{+#>ZjqpAFA>; zY(Y&=6+@j3vJ>yEx#nnkUP?z3>BgiQTjplI{_CpTRm{SqTlRNn{ioMksNXKvZ-?cITtBcLP1g^k>&2DhA5_-gJbvr+Yp2(O*`{4` z)2>`VSPja79ofKu92j5*Gl4_dz^EJ;%>>48xb6g+Z$Li3{lWVndQ~!;zTz%6G`Yd1d-`_p!;DToze}C7@wM{!d zzc<49g0OyZSHU&ioyw|BH-f)p_ONS#u@>k5<}9ImPs>;b|8E)x$9lN`-FwuB$B%nF zV|$8!yxW1hG=e$H+sTP|C1mg^d9J|^i$kvZ<@ay$46;QjC+aS^n9kXTXtrah!^JvV zyk);^m}-4b5U5u+`(0kT&5p!$@t_r$YNW{6=OC*x_ZaLZB_zcXOW|&nA1><5Ps0y& zQyg^X?|4MbmoRA4JS^WUB+P@z+OH0gZZmJAARD9+;h07@;|)|WopbG3jssahAr2Hs zBsA00sZ_M;JTzZzrd0s63YUA@8Gz+k`i*lY;aY}T71$Qo1wPd-vtPN^_aHbDCz>@i zJW$Y5quTHz0&Vc=qTM*aZ@Vt{JbTke@7`y zzeKR2s|jf-!5x_Tk>oMGT>s%Hj0^TAf_y@}s+rRZlsiwrX2-3ZC%pRSC$}3>(<8Px3fhjZc0p>Dv{9sF6n8r=w+l@_mwJ4T z7S|Ri$L5?d7OQKRP@A!s3+lvTh9(qs!n-1b(_lZyyx0jpC?FjRFLurt>;cTFZIF*x zN`9q9ewTr|1nx}e!MS%COat;hs{&(+^e+)63?O*>KE9u)w_~+e2D$lopb|$ zG6kpY99O*M!kbigm_k@&3=+TyHC{=w#KiH&IJ>pRu z9;HNW=j~$YW%Pw%8yH|mk`c%sj-eh^Jw@n1DYM8HRD$ncQU;>`G^sq>b*0}y1pN7u zcmv`H{N{s7un|FdpXZlrlsC^J$&gQ|9s3PJ6oi+UnmXSklC4^};5}z9N>CK|ai6MA zxvDc;H7HjN-cF{g2GdoeDgIU5ssl!hKF=nho%^u|tv^PfOkqs^hhh&~Pf0py@Bmyve%%?a*aH8M zaHQZOAhgcYBaGC~3v!>HGe!FNLOW|ndrDl{9}+@gU4E$z5O55r8r z5Wr8~T`sjXV2R)Gh)n&&Hb@v~&@c6Y$k10P134R9afV=7AW1qER0V1X+3>6nr9NQ5 z^(sb&t^Cx^F#Xcth@gr9g#+rEp>_^=#$Lehm5>v4Em1#ecoU^l>aVE^-Km}34B^a^ z_qKi^4DMW$E3%G8Eyo$PRo~C_d8tZu9EhwUpR?Vml5Q%|Exm;5Lwv!~SJ}B+)NjV< zoyDxH=jAkaF+?ITeVgJlFt9F#WWPrXe1q#Gx|l-((-&6M?Ng&Nvc zx34y(`<_h)X7Y-Y#*Y{)QuOse(FbA!`n;2C2h#2Mp)f*_EuGQG;^m@s9YJKsY zwzPi~brA|EwnG7iv@xkl8|~#XbUO0L_>N7nO?E2t%(;W%nccpS)5Kn>9wQJ z_GQaa*5;(s7b;s3E(PkSL=A@WFRyirhW*U{qLMLN^ulNM(}JRK9)HsA44Xkp(GOfe zO3NEkVxZEc6}#_&OcsX@G?^?9lBkAAqBaiWVmoUjsfBE~w3VCq3)RD>;NZyMV5*|)slKkueO+@x z*YT0AGZPZ%S@2mvYN--II59dRq{;+Ab=^6KmvZ47`Y&RD(M=6{7`j$fF2VtmzdA%6 zCrzi2FJG13Abd&3!LI5`pM>c)sYw<^tJNdIlhAXnSI z*@WNhqhlB8+T$Di37t?3yn5p06E~w7oC>RM`?J1G$MHY!TYvnm(?2}@XMJ+V@pRP* zlY;zwv({0L-_UBk_2_GluI*Yonr<9;h5zq&16=0`2eWeglOsi=twn$3tR3A`@mH-? zqeHGdiI}n5JgM$B4?IzQADL+ELV0Ye6VqI9*=nE+>`nIJvqUCZ>vJ-~&1E4IZJ1NL z9Z6@TH94JKDH|e^xt6l)xy_yxHZcEImRR~;EO66wW}RJn94sM?c0g3y^{-YhV?MF7 z*pSKQzG9ya*1iGzdkNYB3txDJOT-c)$M7trCuWgZbm~88W|PIBAeT@@h{5~XGm zdDL?rMp8G8ienZ`3=;Z`g%&4$&Fs=LejSnI0T-g|`ye6w!cFv>3LYj@oJ!THqgb#j zcBe|aRVio6tqU>}*btZkh9Uo>m|dkxwVbhZxKJv%ifpO?kYG*mX*+|Vkx}J>q|$+8 zpp!B){LeOtYU}8zR(aC*3A_8uU2O&;;OHC^FtDR0d0PP;8&*23IvLsu)G5kS-@q|0 zmv}_q3v7w~d}^JJ7w?ms_hp+;$jv9--JWTNb$i2|vSvscbxOZytzpf(2}6%#MYsn? zi}E%R<}r3_x#XSJj%@3&+&Y|Xose56-u-5#^($#Fc`YQNqb7pH5k2$goS7MdDzDrn z^Sjc!$FsZ7%Dc~Q@O1J43Y7YxjryT4@(XyoP_M6aSJ&&TMHZ zm1L)epE@CWBEE#Ir8Dq1GdnXQeG8Q+#W9c|ze!d91NKU^h4aGF(xM8S?^4RYqZ>P6 z{9Outhi+Hs7N^@1-7e7$Vo&@!79dGFMYnTwqoXeD^n~;@g^p8}Ho6_A+ex~0Qv!Lp zme}GVX}3f|EGb1dB55QB%(eIta!AKxv-nw2kWH|$jVACt<=Rh_&qnc+o--kbbOV(# z>zczVQAEs+L^te-T;jJ--{;@u{@S_kGv}ouSJx*`yBb}OIo4-Ci3~;Al zj-0-@c$YioaE&=uk8P@f^$soY&TcJ`o_;nRiOSQ_&(s{7CyUBmZELglI6UsQl)3uu zHt??2O@Fa#=UV4I4v)K@ZddKyNk@%q;O_QP+^Tt3_g#NEB3n99=(HnNgsa+B zyV-;2HMEMyI*Q}5*%EZs-Y(wc@OXC@d!)tl_vm$VZv}GCZgP06&)uWPT>&4t$=A7d z+#bEh;c<7c&Nc10J5=VXxZ6;Q;AZu1SM~bTCWpsu^c#=OMMub0LCxZ^-gl24n@>2l zxjd`lCWpt`SMJec^PFP`;?N)-YnSiQWAkZ84_=OKa(Jwtxkr!9DaQcfP!^B%ul(Qi zxJx%CvtyGJYLE`mR4?GBxMBljWKb z|LkH6q9=){FzFxBjp5z=pODqir$o|sr9vx+Gx}7qM3jsKf9&d6dZlVsRh^NRoYY;& zK`_MNRPmc8&cM~0cV@{)=~HxLU`Hnl)o<)VoWhel3aj*x^nW4&e_UceZkxpphvVm* y@8_KRZ@HGg<$7eU=Wn>~zv1@ie*=HZ9YXkC$pxRobEE#%#+MsE<0#B3|Nj9(mCjiJ literal 0 HcmV?d00001 diff --git a/__pycache__/command_processor.cpython-313.pyc b/__pycache__/command_processor.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257f8a2c3d94b553bda71611fa27269e3cda0bcd GIT binary patch literal 3556 zcmbVP-ESMm5#ReFk0+9%Y*}ICuQRRKIygEymJB3{W5ZG7)J`M2y0CR&0mPX)Q5Ta( zcJ~-b1cDj}&?p6pT5tp_$V2>8C~pDUhqf;*oPR)WfJQzTXo{i_eq*DwE?N}m%-tR> zDe4Eh0B1jDcXsCHH?zadOhzNne);Qv&Y3X!BTnL`@WJMHV6Z|gLWw0<;%mYsk%~Hw zrPriOGLuGQ@8 zH78InH+{F^*|iGSA?zeR*?buWD`c8b0qhbjflAL1OQiC&GM%ubXS*wDU2FW^5 zyXs5^GTQJlF?s^ET=wj`Q!WQuxm<6|v}!m`m&@O8*)`7TEth9q%KVz^IbH+U$#Qw7 zQGtPcz0s%zz_&fG;oEp0gJp^Nl=gsTaFJW_gMH;PLvE#P`#yEAw|ob-(KPJXgdYjc z73Ugtr`UA8H;ZPa;mx|$qFeP`R*Y0)McZYOTZ-HvW$wQQHJghRKSM}xzzza=m;6CK zwvp+3=k<+D?wxOKbkA_z9_A~fKm~b2)FZ++_W)TTCrOFaIE#>~V2JI}i=OMdcFk=&U1)Sa zoIdaRU4ZMQ=xN74%w@Qcaf%#$BF{|dYdZFk!^9Aey zl}@c-gon5wIK)$d?$6o2?%OxOdt9P!(`u{kcz)#E$Y2&<6Gg8!X^8X?=o~1u#=Juf zi5`X-Eg(4xgrTjVW#ofg@KF3Qwr8cZ)B61(Ow05{AQNLTr!)KG+IL&as5kd^O%X1M{l*pFK7e zKLK0{c9vwxF9@Dk5-kPUQyAepTI_L8x_WX--NA(Ei|`vFMxy<$HZkW^Zs_i;j#h$y zZ3;dK8pS=yw1W!j?nl4cc=4jo8==&P)skEf9(q9^nUq zP*dyn0&CV>pW-(+lt7ELLZt!)5!i$-+;9vwgb*?02pa}01WFT)2ywzNYycu0Bbv*) z=-B@?bUb0|aEe_4a+CbEpXB;i$38mu!MU~3&-WMa?=Nob&36){Z{HnlS-YM7JXg4% zE3BPg&lT2l=903p=c!d;dH9aGY<{+92uvub=5pf8yxC??}bnJ#4OR*9hUC9t#n6Y-W_`D}w!#6E#B{9agrHsE1 zkpPEdgkBab)sq2<_`dCEFM`!}c8B(H>SCE{~gGAiPIOgv1iw3j%a7~=h`FiT#N4wG5I$h23q z@KM_OCdDZJ?{N~v7iQ3{FU>B|!?DTBm~eBqfI%UCD@vTafYIm)Fz7=2^hNJ2c-y*h zg%w=g_RTo4GQHknJjx>#ju(%ckY>@#6f05sERqo<7<~b0_m#XGUSr;itsK|eL#F=5 zV$;!!b55-pR*Cjowi}jx@h)bzpMHf>C?L7sU^CCl9L!?Z$lm4HB|Ka{atE~^m*MtwxF12ob`$r3FbV-Kf zk=~0r;j^A$E`IRD+N*0XtdCt?&wp?0MafilV_@)(x9r{h?)`z0rB@&99b6r|{nC0` z|J%df{3el8*;vHr&nJE%{OrOf&#&jtgfjku%KoDx!JfY}d`R!52VOo(?j6-8GSa<~ zeG{tmsVKnsQ&j;|n$@<1N>QWllxI;yIiT9Y~ix=-f9?l)s!;jGi&rx>fOL!i6s ze}!G=*pQN4o#+tg?!NvQyXYFcVbV}TRfC@He-ejLOpHOoUxaG98mW$XBT_ij3-i(w zT?6$@RAir}=YWB)5<}QE5==WdeVqNKAY{KJ INJF9j0(Htqwg3PC literal 0 HcmV?d00001 diff --git a/__pycache__/conversation_archive.cpython-313.pyc b/__pycache__/conversation_archive.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d7edeb02dff6780927f04159f2d82d144b679c7 GIT binary patch literal 7879 zcma)BYit`=cD}>s@U6E*iju~PWQ$&lvaL9gWW|YXIkqKB?sCjLR^AXaIg-X9huS-o zA4;3eVu4*XKN_WN5IbJL+U^2n7AYbW3)C(8quEW`{uGzVl9@QD7fDh0pIoQw!a;$a zbB9BX6nfiB>CU-x?>+Z*?)lC+bKmE46G+^z{%7I8gM@s61tZyYg1i3?5N{EYP$F`o zWr~}$P>X=Hb;?Su>}jJmc-p4ylMd=&b@nOeq?0-)dCIf0W6Cw@rfx<%r#zEh>IIq? zT~oeEKlM)rXh0z69UY{Xi0%*(J%=p%_=l?Vp$)9g3w1uDjyBF)BmU*%$8*XhiK@xG zoKu7(O)bcmBq5trlJinlQu4xFjtbeOOkN&LnOiPGA6d;yN=k~}#bF|x=1D3_F3&ir zGf;M=llf#SlT_7+MdP6$<>joT@t7~?6bYKx{wSGWz~>oYc9%mw7Ol}dClMG*TKmL_ zXOG6uoJ$-(JM-L$n0WNu>9aG5(-W!c3faS|7r6V+0r3_&PbddFZxK0a?I)rI_St&g zF531$Eu-u;%2A`7jN+l4tM<;#C=Yeadm}!r#XP%dDWAku-VFfL>l2@aeRn1e)UES@3oYw4lIiHaYyy<}kn2X~hzUdot*On_~k?pwUD^ukD7mY8k;^PPLi+PQH zTTee3w#4uz??|GhHnw%P!KjQi8PwLZm&C0`+gR(?dxJ7&fZ}8$icPe~Eq)?8;x-uD z)<+`FcWxevakQe z{%n6b(SNFcx?g-~U&_x7j?COe5!ZM_QEOZx;-Pr8G^>)kOz|RV)}?$(<3XX%F?4gu zY7Q_n3mV*$l~XjQN+9WJ%>huEOG}y)2h7M9G>5v7Jb35{&9xw1Nz3z+nvd9N6gbim zWa~$6po6oE2@rLF1MCNqxVQw28Lf9xaXKK%hA=nJONvARI1EjT@Uo|#T-Ry18+fU& zK=vB>(%<$*=Jm`^MsAK@A20a_R;-m^_-6Qecr8~7KC$B8bPy=}uKR8THrfXY&F}ZT z+p{q+y3smTZarFTJ^D|3O0APC?%Qo$Z!3SUtbM1{w!hpqR%{z9wT<73-AZn>J+tDu z(-^!K-3UesjfKO7!HvMk20x--gqkqJWbhE!McB_SLfn!9F#`Q#zAVrLlOZA?#H8Le zVFF_r)^(gMZi!o;;s*4tBn{UJ>?$jWw|VqxExfIxH{op^EhyTecEujI)w@QbU37@f zQHzBrj@jBZiutR#|4h3C-96WyaWm=}jX!+H})+|oyOk$t2#d)SRR(6K$R2EFzv zYY&;@BA(;`REbG2>&z-wN1vLI4PHp9QrdK3qHgz5IqO4{64U*+wQN ztjtWT{Ej)W=I)r3U!qEN>Nw5nRA4w^30H|WEp}C5Q_|ZO=bm|Jk6u)BiUv@nS>!Zr zgcG5ttY|Z28ecWSRE@h#ABQ*Za)4KOwE@EKqGEm)pv)mFX@D}#D=F#{g{!EhWI4jq z3FNa6vl>S_2}RA8Ta*;d#zwZKm$HiyC({L5(>J%Cq7r5E(!9ndFM=730D3i(OQto; z+=7b0iAM)XN4XZGt(l`XaNK0oAJt8mXuXyPfS>wTkiAAW1>yz#>#791%E1H0;DJ(b zcxCcVduO?Qpa{C)V&VL)){SpHU2K1P#S40(xvSC|dVBWTg-U2&IrL;P^yC)NvA5hY zT5JK#witVXkR(XPbcleF39akFuSK_!Kr2{hnO}(`*MnIz zNFoH{Z%ZpYZ>tNzj{0*n`Y~Nq54xQqUuy#}TbH-X)W-RUHtx8y#)p4I8_y7_wQ(!wDy~3?~D6jHv?c=cueE_ zYWxCYe+efEcpLYK4TIplC?0{FwRlk9It)FGCmna3<>Ge5KZ`0uUxhvHk2~Xh+zovj zM;%H4kfV-?6Au`p2_Rq7!-O#IT&&CBwDlC5z@KjxThOwObAJd8PI5Co9SwF7lXSIh z@iM5{jv57frl-fPPPS^q7MRnY0M3vqwzMvPtt(q0TXlq)ZcSz~b#$u=kRqm%imK~g z-E0S+m`W+2uYS{K*1e>pk`|;Zd74b+!JVoO6Lp`slFBTl!HYsgo|fj45Tjt4TY!cb z!!WBk=5WFA+mHc64nl5DFl)e?1FwQ@2-tAps<7pT3j>OD8Ei7(23~Z;z3kYlV+BVU z6wouUVKekJiZ>WPv<@wm2W zGJtgg62{%fqXMt6t`_S9go0oULoW!la48kL8J^F0CYimEP9D)l%)4OV00u1}+OQQM zQlsOUTq>DSj{u?fd>y!{Xal`Q3XSiFe-UQ(8~StBm!+%duw8;skrW9q+|z9HncM|% z&+<~1qOVB5jTx$6&Bn0HIxpolYerI7WI%Hy7Xca5T3}KepQ}s6d3ump<~n5=L=R^yWf84&6n1WukTrXsn|Mp>%#5s-kb6}a=~&V zyLu8Zv+XcoWOx6~+3T}~mcrHdUw!x0QrD4Hccr7}rsuk6?PMWV>e#<(uXOdXHsN(z z>KcRkT6wwDbqva#y*CeEKfJcQez4T}#43MVcx>&|&wQ&sVBgub>in$pvDM?1_R!mx z-@JV7(qDYPu=|tt{gvMSzuxoHJ%z}6tkgSl%~|P<6do%aT-$fedAqaw+T|PLh2B!< zzDoG9wWVTs-?fvUc68nIk#OJdo5GGqImovKanK?IlY+bd9zevSu4|kFkCv-CC}>*L zF+vO{$AFwP2+X{NnHJ~_?nr1@h<4_1ShtLldTz@O)50Rc&wylsfN4=LxZ{BE5UdB& zBV8Wn|K;fX;kbYSiS+=}*o)d6&`hZ}Pd@IbwLvqbUK^Lm1mVFjbj5ivHV(te+_M<^ zWGU|60!f2o?IfzkhUkvFp_^`e^n&;0F_rg%f#8aDR@s>zaksHjflNQ8L;q|I_~Z7t z155~)=pBVv$D>`pnNMm^Uo$~t7`xh$W(DKLEX+_(4C+DR5bEbGHJJA;|LhF-6^0+R z-DN_IJUy|^T{Qd$bp6o(0>2Az7J=FfCIUE&sPgMLZDwuBJ^-L!I0s>Gm;?M@N@nEr zK4AtO#vHJvmt|FQ)041>89EF3798tnyd8t-d2E8}Jz~=W<|#p}#DuyC3Xvuqw;U`c zscyj)m66Fs%<$5&pbZNM(NQeo>1%EXfh!QhfsZ5*^^{Y2n!!3ZW?9S>%yN)HSc!#x z82HEW?G|2L!Q%E6XiaPj*yjPmA*B@BsVLUpK%_?8=50W(^_#c2hW9(0{TZ?xGVrJ) zT+hL~6X+}ldW(VHa-gpm=&LlhZ`wLs9hJtea$|3?v3G5Fy?q0z(OWTynBI)M6InZU zV{p|8fYdYwh|#nA=F8V#F7&OtOWlW8&Q^lL&CctcYv+GC@^|BZJN_a6FWwKlrQzxF z@bkst=S#ycmV)0|nXI*1A1eiqK)Jp9?N{D>Wo@iLOYH|&P66NEz722pXYD`dy9XQ*nR#7T`=QM8b`nWYAb$9pDZ^*h*9`s0Y8|*17%-xP!{RXY*>m|==Y%=ML#ZLnVErb!#ON7X+ybs=+-%4 zW8FUe0oI#3F?uGoFMScaApOS3L4%c!VODT^QR}yVL)LANt z>7X#sA1CK}uzlCSb}xnP_KL`d04Eehn?s8>m%F0(s}(~hq&I5g-laeCPl{G_u@Wf%_`!eA?A%Dmukc1K1_fbOe+0-|geR2n z46i%JjOeJ2LtKAMPxbPZrL254Py<}qW5y8^HI105S#D=IW*MKjjXJ>JKv|fH@gJ*X2 z8Ff$V{WY2XgQ?`J0u>|aSSrbh=}21O5|LzNT1W`VG#5!mxmkD|6VpO+Qs8EUINk<6 zN6LOivPF`~6m){l;;3IIS)!42WHKHRMaeJ~6XH>*Geez_jwJ-ij4+c*3eb~16-m$d z4U+jFwmuV~5_>2%nU>7MX@N%0#|4Q!4x>tr6LV?2JrS9mjU}fgQ&gCW%*E4_#dni0 zta8Jwjso%)nIM#bZ=mBDs^3g_9jv&1!oagz;i-%nG|Y(Dgt47Wn0V7JhMIXZw6w_2 zt<*AY^V_8j4@KlV7>3A$D zBvbI%6b?sIlYqdJvQHZhQ)F?8jl;mS0a152jNB5NggK^Z?EG9>5TRKkJX?i-A{_|M zq!L09KH7y~0A6+~HXV#jCu3qznT!~W#Kc%sI3J#S?)Y`m8T zX}rKvJKwU!ikVTk&-UM%&891JbM9W2aJ=PSth{%obCr7!dSX>YTFW`zv?3^f2$tYd|9gvVL zAg#azkz_hkr;6^lD9D@$OlUhZczpO!ICS#(VSaS7xZ*&C5M&^@XI3W|T>Wu?N*OO| zD?)Y_>q13+ski@<$ptT9F%<0Lc2j*)m# za}Y8{LguF_hoE<5;q~xicj-7XrrW2D6NPzkWZbT{2HK_)UX3=4=ulgft`5Q*`sB9& z3fhdIt*g{V#nd%{ma=Wa`f1`fj!G=*VX3BEuEf)zi%qi;3R+5(%);yN6~n2E8AITL zm`eI>6bD1Cf!`ig0VuRo{8d3RDt$n8h@gEy!GxnRDj53K?R~@r^W4aTz7cG;vEXjZySHTBTNV!Gdj_*TgG=29-rv0BAIiFi=Gi~8 zKJql>JzKJ#Ejdr;Jo{anth**P;ebgZE(=ZokhhRg;aXWjnh=$qbddWD7-hN(s^rDI z&(Pg~i%3yfF=dT6Ra>lz&lkjwbpWoAM^4wZ%U3Vwon2XH*OIyG=_$i>l_~EBq+Hfv z@>M1p(xecY7mF&V9fS@-m0wRexG3*}28bR2Q2r*eRjT^gRlC@SJ-h&}kk6d1>xXV0 zyKyXE-AHWr|VkHf}o0o4~&U?E*^meZr^tIMsdb_XdSM8guP|J`o=n z0nMoJ@G(Hm>Z&s%Jkj?VRQ3~?an0aVQ{wGpTvNRvP5GCmsL}~JOLZ!y&72+6kLku( zo(<@e2ACCkI~65_qE}Hu?urVy9n)sck+b`&QHpCO4MswDt)i&20`q3*Abvw;U~qO; zfH)w+C56{ic3k65ATJg@R<6v^%{YdWuSVjrD2HsxZOyp02Drno&kB>s#1IB>;3rE= z;J&`Zr;-I@1n@5sv;G>%5Kp}(P}&7kl&ohW@j2lzr724BlSukx>NSciBI)PSlaeJS zrW6q=DGA94Q3K4K;!1%5dyU4@Leb}0cWFP;m-goo(_m2Ot;T_*SU#>oouOG)$8)HDP{W<70@Vu%CB}BG5HPQ@0-^!vg8F*^d z`|C^nlLla9)DIO!7HpW&i6g-Fx{&5=m70NvXIO_ms|seB2e%zQFd5f;u8=0!C5j;K znvhrX-c+ig4W&LRroJ<82U~Sf=Ipns(T;1Y3H_T(^i@ntIMBoRMW+QtY8D+;kMi8TC6P{j`v#r{G3bHFOLyly@3JfSg5n z1g8iYRqd z(dm@(bFLsf@HE|=xiNF= z(n5RA(>s5(;B@7iyR*&RIcN97+J=JNlecfo+BZJ(afQ|`g|^M_G~8=g=+3qEKOs!} z3(MZtn{RyYjUNuZbNt@%oOfH^yC>`2lk@KT!eZD^_Zz*b#_@%XxHmrb5vTWB{LT2= z19$e`-kWoD<{jI!j_oi-NxEDdl z`R&5^QqxMn-_Otqz~4H*KC|OsT!>IE5|68P7D93`y_j1{%S;cPUXiW~+-GGko|d=~ zIdIf(S)YZ`!Jz#JWM-DRS8)nuxme>AIwo;WC9bS$=hn7~0cCL*065y~TdQTy)`E}A z`?h6$+X`L||J+_+B(D|XBAaZxlmJWn@a0Acjf(u?UkCs0e-~nmF}uMZjsW!_>A?7^ zd>j}COEm+8eqez0T@${-!AhQ29FW4t1GdoTjlvMM5dX zT@-{_@bKc88Jz^ptg&+sRLEfsg~aKr;0CJhRlsT;q|@SVD=J0o4*7$*WO_F@I+r*v zP;?b5NO5Up9Jz;8iI$4}F+YkuRN^2y5F&vqDF6{=+|E=y3S`w5ekHCtrr*~! z0VSQUS3)UkX~q&La(M_%WJKgucq=7G;ec#uBAiZzmDj26H?D1yhW_Gp0FbY!BX;L} zT8Y0mWu2RHwvGiWc#Z{^dwzJuNPOI#J-7Gdn}gZr;G%u8Yq5E$dEb(`q0qYN&gI*e z^R0c^*1pAai^mp^FSQP;`quZ@NZPyJ@%@?a-I*W9@5gg(d-83Evu%fSZAaBMYh1-g z&Q{2FJVPJrcIi|rt7q=%@h7KK%x(s85}d@_!VPVdvVk}0N*l&we=i!}}z3Z?AOnMefK6)B9) zO%bKP-&o8cW1v^&c1_V#f{Vt)iyR~*lcuB5wVv72Z0F;z) zP%@q5OL1XUpG`A(epV4!-P8;wLuG=B%|p)sgtV9!8^C$BR&&70(wEGZp!kt{&wqhb zL;6upalN~B$+dlzFg5Onwe>4{xPB!f*W7D5$dbSH9_Z8mw5|cr5BjVJ8Pf;*Oo0E4 zZ93Sd{~5;soYYJcbke-83&1ODj4xA7ws|et0b*8A~YjRme zc{@@r<9IEiD-H91k+#uhssGwpYVls#EMYz2{k12!XHoSH*&tE1Q_=vjw{cwaBgYKm zb?Q5*ZB$JVStgLcfhhfBh|*1EeM}|&RM{sAb2KWOKzJj11!FmpnWQDEV;y`kq9yXw0ce*m9MlB2*O_+S9uOo0F z(Bc-UR`teIRWENf(=fI=ieLxZ{K zzkA-iY;!Mpw!F*cZM|7r?~Vt%CHYc13_+#I?w zbZb0U@6Xrs@1Fhf#E&NauJa#ye$tccJ(}-*G28oMu9sic>o(S1w}2bDq4B2khI65Q z>D0?v?%c<<<6pQ*qyKROsckFxx(nVm{Bv)FaII)_o^}N>hz9e(tUd>7<+s^9S;y(C zVkp*WiELU?)@fPAH1JG-1s76F{#DEB!$BKxHi4$*IF*LuA?&Y~cULxcc_SP%F@lq- zE2-{~<}jA1w3D-Syct|m%Ptnogzj1D6^xyxCPSdUD@!}}*HlXBsj8k;6$uB4toTz^ z`x?3}rG6^bPF~W5IQ6b{h4*hS-KkiK>a_J4F6-{1%W2DWol*`St&{iQBI#5jHVL_s zO9AleF2gt_Rid0xVP2z2s6IkZ0L-v~G;AsuD3}e{GQiDiV^oL>Y24wa0%Qr{jKka% zTt5eXh0slwi4fQ7E@eP7B>HA?|6HmYg|j!&({Kx0 z=Je7LsH1&AR?(vrr@%(%60>kjDn2L9NV*iBm=Z4~CnX~iNTzTDUQP%~ES}bpJ!RMh z)s!ckrb^7poAqnrOF0!OtuX|e5-i*P_zBEF{1*VQb7v%wF6_S1ov-W7*7X)(XQKOd zcfRSlY}0dVZyJ1eY`1OshMsIg4@f?4F+6qIT0tUay;vqG88G^oej zDvxt1BfM2Mf{Zhtqq2bBGjNggVDy!S^6NG zuBWIsC`O$W!%d3nf}+eT8F4%@L00Nir@l*0R(9T zXAra@XheYeL9yAJ5kr6z5N`qaHhIV%_~n+vSB@=h*py=rub4?!ps=ND#Z+VPKXPwb zF~Aj=wy8s|fyY+IuPs=|5u;;O#~8X-^?;QZ!%kDLTcA!Vn50(DY0^c3wGqV@zMdBaRdeCq?^1(aWJ2uv27fiV9mk z4ks^I#Y#%%y~e4zU@FhVgufzWUt*Mg0 literal 0 HcmV?d00001 diff --git a/__pycache__/ollama_client.cpython-313.pyc b/__pycache__/ollama_client.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6fbac2ee48b7f8893a8dd6aeebd46cd5f576089 GIT binary patch literal 9161 zcmbtaYit`=cD}=z;S3)lB~q02vOJXKNR(tUv1G@VqolGV+mT~As}VO&6fZ+lBZ&z$ zRPInttYnK|3j|7Apmwlk3H4u_;<*$ujv_L(#HJ@@f_=bo$eiV8P@@b(vfoEq6h$RF^<4x*)S>%Ro$J)#my zR8DoA;D#L3p`gy6;3*GJ;e;?GQgO&hokJ3phGZ(Ubn%31$W7ft9_nH5&J*6D3R*Gb zqdtYyl4hbx^+c6l^zY}bwrC}bbwRBA#mZuA6^r#itarbokTWpJM=It{^k>qVo>OLX z=}bD8)(mALOO>>q(`Yi4OY4)$Yw6sSlAF?$Q<+S1I;r$M`?PW{om9rtR7>TQT;Z`^3(9-sMM60kVTa1Wh&rgC@>HA@ zBBEJk_1pSRR?Te>l$hIhBxz`h_IgU2!JaBKnKra><+UkIw?7rSLaonGCX(q)^kXvo zvD1_j2|YQjB@(7Pk(kbo&t_2fCK9jCCNqT;eu164Vb}TE{Ok567mm(^_mMt-l9fWWJ3SX0 zDI2)5bz*4roCZtDP+&1A#gb9>sl&7qdxRBOeZxtl^|rwHV($-GIWTck?+Z(wTVI5f zZ5Dt}^{YZXd0BXwqoP`=il|fPi;`NUI#J&b5 zM9P0^NOiMx2lZ^5UZcX6gZ3TN+eFkIi%4+TvtHDrPJy_CP9Rl0^97xtgh5Zl)&|%VYr1|I*I=^TJ=d*B8SO`8C*e-|aQ29d+of|9A0f7ln z1ywxca1h-QcZ^`YTY~EBIS%1i!Ho4ft5@IHJr;-WGrR6*eaVLA~yXARnnG5Bp1anAIPTIC9SF=%6Ygr_Z-&QtAG zQ>L0R1B|2@2pmdb_0Zjzp=tBN*}C|E*oiR#;w_RFL|5z0K;6RV`!D?Jg@tFXxR&bo zeX@T!&^Ocsco5Roj*0YkT&7l=vX=3BSDO@CD)X%I14K2}D+_+PTY@+4fP2Cg+2J}0THUiF8U{oy4!eE%N9g~0aMlc2D^V|!bI4L3R$F9KN-b{Fg` z4mVi~fEIioKi70j%Q4f1adC-*6oxucimeT!!IDWExdIeL9M+m~TFA5sX)4CGiR5gi zkkzc;^u?Rf5N-@yTI`bpHLF$O<*M*X)$V!c*Nx5d=Po4QIlm-#UJ*c8qkw&}LlJ3o zh<(ruo^8Z0E+Rk(_BbwV7Pg=B{e`h?LF_7_5!nub0J|K3UA!s)b~(my_gOVNR8e&@ z?BYx4GEzhrJ0Ah#G|rD8k#7n1?2S}yNq|>7&F-l#s1o2&v2`$&;a5Vz7}3S&gpr;1 z*QB~)#LgEa$Bz=CJLAr6bGq%;%4iY#rjQ5Z_t^P$DJ~VqA7<(#bU!ZQF<@(mOLiHB zoIFm52Wmh4Y>9HqW=^zowvY!S{P53<2w!ctHHJLL!Whz`r6B~czz`m0setgs5seYU zu?u!g5#_yegCkmQmg;Qd6e-!-G76fvMkm<&sVO`gpt%RI3R{958(y7F13ge}*3hEE zGy)CNb`%{TZsUTZQ4pg?=@alU1+2^z**=R@(lCp|owybCg_n)2Zi<;~a@;TlV488$ zIj*I^a@axybRV|Sg%$CWTF!JPXW%d~9+9mrT>;ipN=r~}0{T2fyD>AaI8y+&N)|em z&S}%M4`K}&1keNv%+jETF>~#vU0;SA*sk$M5O0xN9{;MRX&L{u|5NYH;En}hwYFoq zwqqsOx#aE43ocj9k06noI~p#{y*qcg?W4UP?ENsd+I(=i`QS=(&&m$qlv{fu7O_Y` zEcpQ0xoh6JR$Y7P_{HN3r!UhltJ~)X)`B}O9lUsO;pj?m_iFIq<=jWFfAIQMX{Ghx z{J=N<;CybavgT67#fpXTFDrN7kw~Z`?dQQ;3L<(J^j7>b*Bvajb8VFM_QD4&=);S z`gRX=^NU>^NPu7|%T8fNk;xFt(9~pX@a{u!u($(ZfdM)IW<>%0<6POEa@czc?XD=V z3vu2ql@tg$-K{!Xp{XB*ic*(j3|9sVBSrhlj$ybb*zJwrd2CBiWjj@MsqRNP)uVb3 z3A#8|#LBp+R;WIPvCgu#iq=}GLcmz(2%ZwQ1RF<3LR%8x)lRc}p-ZYCFt(TxPD=2z zu~Q+>N!uZs8Z4G~#a;Gjj$3n3U6=#cwk1O)1lQg1?eo;A)pieb&*|S;^*sa+)gm4~ z6a~kCGp)ez5YSPVRbkWDb81yw2KEbd@z^S&qgtuUeKJ@T>^Z>|v2w>;=iuvF3Xbqf zQaKB!omUVP!4A$bkBE|-Or~`sSFo0&rYmU}fNx1Xw{gHi5&L*dXLTLQMimu+$S%NK zFFi`u^1Cow8;?OW3+@vMjz&3f+#CfP_XvEFDFZMY$w|#XBN;GqnhryB!RE#;ZHTkE ziMerT}dLYv6K@`C2gdRgtJPnjcfF6g0h?jvs8zef4IdNP}X(o9d38jn$oEDRrJotp@ zOPI6RHv^cH6sVxY2d04Iy4k@L#i)~z+Zf=O^%t-VUa)y@)$U$vQ16I*wJ%Tj3SZt! zY8r0VHZGk0jc=`{;f^E(-Sg7-KH?3oh1)JXy$t68p~~e0qCgz)x!7}|b0NJ{*>*Ej zcj<+9U$`u-gxXd^vE@(Mcn{NUt=r&e2fmRovOTAo;`>%HJCr<0C+JqgxbYQNaN@cc@kbzb_~ zU$fTOdiBV1@bM+@S}zc=gd;W&)E2UhvV}@UV?lZ1{w?h&?gVT z!KbHZfa9-sqrS+w2mBIp3Fv+%1NDPWezDU($nlGZInY1lM3DDU%5L<9Tn0+n4d&2; z%SGK|~(d|Jzl8{nL0Ub7@ck3Lcb5D_!jv#)@A8_iv1f@Y_g0bi=u z_K-x~gNW$ou`Y0lV`a*8^1NM<(prG;R_k)0wZKKK$^@2RM}ZtRWJKNcSr{A&b4Esi zsfvIKCU;iX;Buf0m*bGh6mxK}c!gm!{V5iG4#jB{&!cz&#Tbe+DDXT%Uj}iM{tVRw z2&lB!4ofpd4KETNLNN3!7EYlkaSNlB;}B>1htyNU1MI}XTuj@MO~kz&&(LwKv+SgS z7Xqf3nwo`=M30hzyj5Zr%cJSBrUaazKrfrm8)YeffyG|}mbp#9%dnMa)@mD(QGDML z0jS1JDuUp9DdU@Z@P#5vAmvn+Q;yvFr%{ftoN{bdS$C41doB!o-LUJcS|u-dL*biB z%SZ0Nb6@HH`@6Q3uL)UUc*H?|< zJ3MLZzEcNP-SYsmELFAt_uWTGqq5o%TW*N06Gtfg?@+x6&y9PnHupp~A6w)Ha}SK! zTAjt6_lU?C2y*|Afp*U?4h;IZ-vbQ=$#t$~z`~Kgpr~6zYo(_dunz=;a~jq91cERx&+UKj`5X_f{7e%g24{6d~qQ4-a`# zfLX;LfyN_y{T8PELCocY!;g=2IX1D-zD@2SVXRo|cFrGk-Pl+?f*WE>00SP{$AHJJ zN4dBI+(dAMIpr!Hx=tTK7q>0~YoL#o1vuC^RjF{WDfIx2t1W>YY>Ee>Qlfo$`7zET z!=cLfD{;ibTq=`t5$ywZdt*iCO1A^p1KxCL46o8yP+f5e$Jri>xTHagy3{9?P(0$E zYdx8K1-;_dO(}Sy@um~a=CZpoytCPgbPVK}=LGqMA>Hs?W#Oij-A8KUJqnn&3MM1- zF@#0rhP~1`RkP~(686K zkF-hGTX@vlnCz(R_w&~e`}^Je4Yvq7a9zizonbP*=#mrCm=3GIJj1$LzwRk6j)-;g>I=asP_!a@DYeHd3a&R zpN(4k3VsZ4q2aNj>xsstN210T~4xrU*d9Oh=#;BnK^@EAJh!7}At0s8@BB7O>vT0(Kd(jW@l zur!3?5fotQ!^wDt;zfS$JUxhUy(rL=M<-Ftpg4yDO~$P^(mOE@1=en?fq0vICG>tT zIK}YYfF!m8?>c?!4u{yh&V!am4VE{?@#5Y>9BALWcZiMoCrNwsj;CAPhxZ`Cyo8!e zf*pAmYHku}&9hW5aqs%B0^@X!q_|1$4nN3=zO~whyrr~0RZxZ!mU7D#{!YX^dANx@ z#+~5ue6@ItVziku-{T{xhn37n3N|dtxIyH4~xFh~s-Kxbp|0K~clBQR2)Fu>t|w0?V6G8QZX7z|Ogsl5N>? zi;a*k&;8`R=i_&N*Nt>KN#Oa_vH#3{o+jjTtTdlE7lg*!AUq&Cp+pz-*k$2TjK)-y z#mnNQIE^zoep$Mdpb3yAJ#o3~l1ycl^pjztckL&-d_2Ziquu3hEm^yG#dGbzqiPBM z0%{iS+HP6B>Nw`Sshamp+c9Tt#|{=$(=Dp57ucl*9O1dDADDrq7UoR1Z26O~a9K?V zQ>F`}%)s_sAFA?M6V{q_ERf?D-nM6734ENwzj4;FEjK9eJ@HcHFQM@p5FU_QgbHxv zm@ZIJkI}f^qi1w+Ke-dXBTz}t>T#550wjq^UHUFP!6X?ZI5hNj>$`QCNlCp|?`Bf! zmIAFwl#fT>r)g;G(^H$;dUSSSThzkKqZGIcoPS8%AP#dA`26u$Dr0)jnyJwf<6)Y zU^aisF)d#Eeewt33%2RlHA@ZVtj+eH=Z-R&Oit6XKb2JBDKP6zMaSi~RBabL!!5uP zo*GT!gXgTmUBh+*i{3LG!?y~aTlA;Yi{5?JD+QK|6ACzJ^M}WaR>`b7!IXOJh_5B> zzrl%S&TEOV%i!M+cN=Wl`r7k;m^4}{4pW9>`hme57Ak=~Z+X?gu!~T!eS=nA7kpY1 z!?Jr%+GoAR=#4pck+{Pn37%2+hy1G^YdP@>R5i>w0*m1 z&6+gtv4}F7N~|n|vH|9`|}pKl{ttG zwq5TvT%+x|AsmlfQ6*)eENHRX`^o8IQ8il*k4B!gIFi^_QN2G0hTjAOK zspqS10l@_v)C|gZ)l%(Jb2-O4q@Fh&pS1xfX~`KaMtfk-VZy9b zEVuY5Mt5UXZutr-Nmc0{Y*ox+vE@bKXlE;J90&R${_9YHrFw|c_vx8W&OA%Kv7%%@ zxcc7JWo5Xo3_m>aTsgEV5@mw+!^r=^GBE4^w#@i7Yu>vDQLa{~b|heYnTUS;M-U!#@YHeW!^eb~Wk{R=e7pfl@NJmi{xtJ}N^as8*mEt>l-A$FIA30` zHzjD@7;RB=Gxcr)UO^LUuj_$HgC}%?0z|rV*v9~R+eS8E~`uO z@D~HQ<$;N(0~27pW#wR9Imm>Ox-!y`#K9y(qOBH0?K2BL1mWu}Slr%%+vs{0jQ$O5 zx6RCT1k(kuRLk2Ko{6;)J@6`DYpU~FoSBNLpPMG_dsSMnj3|bf-)}HT+=q3;p`GmE zRlWl{^3Os6=E(FeXL9gk<^Y^W+-D}Lrv~b&vA?1fhMp=z9|z01d_9+6QjV@YB1l8OOrB-TqEMw22xl`zv{Ty_==qpO7-5JQZV`yjH7lr6H{w1l*&t)_+`x7r z@l-5odqumzIgH=TJun1Ko4l?`RDm6ayQ1iYiLS+RS*QRDiGta&XoipCWV?l_KIATz ztkTgLIG@>w!*m2^9`5Wz+u#OzlmBBV7_;b3f1tbvF+EzBN0;RBmHxp`l~0uA{-gE& zql@X)1Ocx?_k!QShr#mLoAt3bAD?<60D}K9(xevyRH(QhzqF3x8$71X8oC8Aa ziqqniJW^wnNWEuIt@zGd;(S@fpwM^wYX$QH~5&QNzPwl+(7u(WB59j&(lOrqD0q z3U6V7EE328Qd^$&5ooyj*k|zcGn(*fh>z>V-YzWqpa@0q!Z1M@xP@`tG592UVr}nP zaA3T>-@J(o`}QFfdvDY4;@oK{0F&C~FpfiE1?JG`gj3*;>f4SDRy z73u*z2MZK4DA)2Hg8`^rFw7vJ_G~q-IFg3LWU{z6!;vC)F{*nxQMut8kWB*9p76kPxf+btwzoG_do&W#< literal 0 HcmV?d00001 diff --git a/__pycache__/reasoning_controller.cpython-313.pyc b/__pycache__/reasoning_controller.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcdfd88fa76e04f2fe744a15915775e9f65a3720 GIT binary patch literal 5779 zcmbVQO>7&-6`m!R*?JGc8NHrYdTZU)167}G1Ms%A)rqN#c`WHa4y$&g}cN!LxMq{o$*0hQRx zl97T!_8jC*`&o4P@2fqpXxJmH0$4p-|mOE2^w&Q0x-LxE2#d+6*H% zp}1zL;cW}NrC@2q><~pAA&7~RVNhj0YsflOXoa#3_$TJrP)b{rhcb%##!v*HPAJJC zC8;XV5mCds!c>POBAi!61a|f5X8^O-`S&u-UF*_$8}?kJy76-*DhW^ zy~YL$f$nSPSC0LK?WW!3W?K+187i|e55-#%ui{Ms?`{HEw+PN|@&*eyx1gbe<`#Sc zXK5b6FMxr=JFgHBJeJlZvm>_{MX!LwT%o34tkwgDrDZc9rZp*E^I8WgRx9b~sQ~SS z=_ztWgl=?PXP&2fuz#Sl0`mlp_B1f7NVyd2>K8#=Bb!dLtNYe#H(q;h?(V5v$B}zK z&UKt#@f5h$JlC1yI`6jsfn7!Ic!58>8XnE@qbuxVZtuF2?A@R58P4?#uk}2?!hXti zS(Msh9B3$(aYR75iIBmwJ~c9wAb9ZQ_;^Z=E$~XB1}uvJiq1=5@K@xB$1^P(S*q%% zJv^itnw1;ZFFDC0`~@kk%RG=nrdJhR_T0BobkCM6yU)-8&~+SkX+b@TUUsl%Z=)rx zmLJ5XyFgqc>rKSpp7-%NAD{OP=X}Gf-0=3*LEx;_`86otW_56omt@M_uGo!PRr**5T6l5#0{u>uscuMvn_>x{s9=phgmn1=@Q{Uf>l zk$nFI{H+*yUtiAGx5~}`*~nj-&s~}abvq|cM; zkn^h9MWh@?&!x`(Ihy?C9ZXM0DY9uBkbx>lI8lhk&4d3N%ThI@r9J_|g0sn=_x9wx zJ$Y{g{$Tgv4-~rg7P|Hnx_S!xf?s)^?JeuX+1#?;ZWZ-3DB2D;Mi3Uiem*6FRBD?a-mB064j@~W-+ z-C5f-hU9e#4Y7R6+M$NC+}jT2V8G=b1otVsZWBECE5GAOknAT%NM$u(YzZ4r_7NU_ zNzeD~`$&S>Pikg`hlIK`=P$o_-sU0_%!F^Srn+q(d@VtWeZq+``!d8#HTw>*hiZ$s zZ!GX|VvJE4{a@(-q}P+^a@}c+S0GfDAmA$ogBXvYv!vw?_$yL6E9)MUq2D;NMk0Lj zJ_Dhj8q?wsEL&m3h3W~2`d~`1Zy_qxx6zQBVzgj7t&mZ-PA?{_Wn|T4FX^Ps;ZpRR4E@XJDb=Xyx+GW%bc^e4G6|E#v#pM13PF$+3DSNnYDlzgF($e?{f{6j zflDyw3+8=;Ip1L37tZ;@PudQwwH;h-I=Iey-2H_>M?TP>3-sp$gSo)qz3~T!R#6-K zNP4n+&#k2!OLqhBy>*>^vine>|L}*Y4^pc`V;`Mg>z^$2^?x|8z<2z?oIm*6SL2T~+FRg)m0t0bC z?i>9mjQV%oV@Juyj!OO6X9MT!s4Xjl&jeHp!|E8=olYF-3`nnkjnoVgO@t)r*HiQO_n4 z%A52hEW^DCsF#JF9f@>SPnkAN2UTBEW2O@ibgE{$sGLblF_~V3+7J;TW$7hs>BjBW zI``2i$)AxcMs;e2iMw;tek_*vTJX? zGo0%T=Q~g3I!`_5U+X-(G69z%w|d|0y=%XhyxyA&oOrPRiNF1p`-b~=@ZSCh2S4$j zDfk1oT<^Mm;eO(8y~W+&ZU^4)ywmxs?#KS%GZ$$c*=!=szMoINGkJX}-#3!$8~L>9 z<^mWuE4V27 z-o2PbfxYY4(@)7~>cEpCMc~b{TK7vG?f4V literal 0 HcmV?d00001 diff --git a/__pycache__/streaming_handler.cpython-313.pyc b/__pycache__/streaming_handler.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79c9e602a277023b184e13665c8c4b8089f93b2a GIT binary patch literal 9532 zcmeHNU2GfIl^*_TL=Gj|mSjl|nV}q8A}vvgZADwfiLAz!qgX5395GR9r^JXHQe%@d z%pEd`g#yjKEVMz2)C&Yo(1&PWcICwan*a+WyC~4)W%H^DWsn_g3j;+T{AScz6ezGj z&$;tMQliopc}sUp-?_hM?wothcfWH-k3*pV1J~vI|F`x7o?(821^e=v8Q0_UkokZS z7{UlH!F}2FvYWVh%zG|-UiK0%&3iBVUiK3|&kQmnjNlt)1pk!Vlt8))U2%5(dqRas zVo6pCJdw1rs%R3wCMvn2MEJZ)_%cxo(4fh0N_<7VB`JJP*2+ckcJd+0iMx!Ts3>Yh ztjI7Hir!0$@+#cB-jZ_#siI}g$C;_eyty9#BV;~cZZO0JZ{QYO#3Q(gH_8a%D1-lm zLxOjhx#_*>B0k|T&itmA_=O{apXRzC*F|$Iw6ZkU4Y_Wb3kZEefaZcw7o<5(=odJe z3kd^4h~|2Pqe2hO^$NWtT=2vPjj*GSmrN}@?1rhX&;G5bhy_XGwf*X{6vDqHSJq4& zO|0IYFqKs;=fsMnC6}^3^VMmJO8}~HJ>G!K2h2*i=rpc8U3A%nmGebwu^48BFIviD zV3E^I(J}<9N`*?%vP3O{ zv6Soy_hq(3H_uAIivD#!;xjS^YB_EL+@Xcr~e*H+tGv$Y7lb^zNN2=TyK{H8dt% zVcwXXTvJQZWLZ{jP9{NndATqt7Zh2WEUCF_QPL(wS(9_psz@fC>11qcDc?5OOhyKS z%4Cco!S)=>v66NZ2anShAbE#*)W-~-e2;7JlRIpr5sB}x!;PWXdp(V#LmOVKAKCEI zmM8H(+&DRg_v4KStsibY5!vwGXNMkxFedBx5fmJcLr(+PL=VDF^r9uFH}x6E?`Wy% zdFVk+&jSxiQK+^hnfIpup;xUR>*!J%K&F%=IDC1%XATe+YLv7^W+#da3FyyVqCeaye`314J|Z zl!v{+2DkJA|E4VRxa}on*f%p`F4qQ>)N@UIGdBhB6x?q?npiGNN-nSz_nT{rOKteg zTEkP66w;41p-jG5)z&O8zvrf_ZuqD)4fV&{t8Wf9L8Qa#w>F=GaT=}=xE;rY`|7~Nip>1=HC#11>Q>u0#$jS6(pdtRj|b$3OBg+~54Qn8xeWG^~xNX(O6c772K_Wh@68L;Jvkt%Cag30Ns|5)*0?1C~iN zftzkA1l_C6B>L?&xA#kGHO9$($k4Ihb0|)L@(4129)Jk$l!MMF!o0w> z?QF~4h6M#+=XqDk!SQC7v}+=S`fKtoivLcB>khN(1S}&6DINBMmQe`adoIEAJvV|) z+`IndJi0kAX0z2&wFnku0gr}QkzPnH(e2`%%V5hyR*77>4I2f;4DU@gE}iX`c+6QGf32X6Xr(JG=sDH(AnCdDQX6FHu5za*&NW> z(fh#jEEe~dSOWSyMT!}~k3WSJ;&r;&cOV)z84+JTsE6P}O?1YSCQ3lNgACB!x*X}i z%sTf-Iq{c02G-kc;}j2T-x*PQfWe-@whnmGHmlyITszl7aFCt8k)|Ok1WA$|ciB-K zgtOTyArPwBw8sjdU`I!m$Bhl&s|BEbe?@AQ}%pkyjOIq9T_hJ}VZB zt77(6VBG^0cli&&7S1~#X9)nWWuQFfdLS`^npDZW2_eQRgou<8Q}%nvilu<(K=@Xv zs3j7E?kzl+nznIlzx_I3DB*y&Ac38&hXDo?Re3cVmsVu!+|+qOyLndJW>xQ^>A{lvJ*ZF%!xm>b@t!S-8;9H{M@r0IcKtE2vsan z6$R|uVfhEWecym?TQIAGY`W8KXRj#iYc9v;!1vRx&O|!p&}Vp%b=fjtOb45^TPI+5 zYg(s8kRt+(;c@-Zh;ZN1Dt4?5v5b$P6&k&8CXjhk%-yVNl@i#L(M=auELJ6>8{4Wt zhKUivY#9$0XfSJW-9U3(8%)ij-}f+{xh}y zsg1y|gZ&L|pw96+$8QeTV^ex;sur96JW-!X=`*R?%(5O^zR#tRwhZiFH`FG@qW_{; z)E--ckA4&GfMO5gBEW1qtW#%s_L_8LFEA_fg0ZZfZ{8h?vFFl(!5lc34)XTQ#R=so zlfu1+UN9FYZ`UylGP6z$N^xIzkS>MDu;-%83-MvkwGwL?heT=51$(QPXI5fo-A^&V zzt-kuI6!JY`oc($P_P4&E|rTCpxXg3xLk#1Ud{7k&i-ii9Op}Lpp>yn@xVMKa_0UV zXoh|!#L=Ca?%6c_M>Wyy8jgAL*8G6X(S1AaF+;%+ZLsAVJ!a9Kg@(H&9OsxP1lU)G zU{a&!F&!9QTl*KFZI zIJ(`N+-8%egW@aO7v`yh?n34uV0GN!MFdW#bPkSQ!<~YO<}qz&Q+6bnn|}vm@8sS> z%P9vO*SdR{GjQb8$!*)7|IOzZ9x&~%t!*AoE9f~jwGi8oX_Z>7w4C@_JGn~QqgRV< znx`424BN_VVf1v?vh>z|d%cJD`!+mcnLH2cIS*$ilsfQGpsazsfH}kqg4=`bBdk49 zqsy9In_mz-f$VT((&}?}p^J6{5^#nAhco>1N+WUlFChriTk-A4S%(@xTvTT*V$iUO zLF=Z0)RNloze0+Km+-xZQQU$@@uWM3zbnr`eFuw!+72{z0=Ahckc^k&O?w^7h5a(^ z?L-FzuMP6wbb_Y%4jAqGvqt+@UO`wE{7TmWZ~4Y|=qZ+YiC z^p@_lixP@%@onHbXlxaBnZ6tqI^r52~64vHh%m?Do#g_OBU@5DKoCNg){j{V8cQZ-W2ZAb zcXr0?`=b|CB@5?$@JTT~V?878H{3)r{F)?^?3&@faAkRJanT4YTw1!4y7=OpaMAD; zh*~WhKA>-M8Qu~<0FkShq#-dv)CM4cqGn?FVAViSx1(u;Yh|KF*xnSK5r>?vPSxRk zRi`x!5^#0Q%Vd^E;n-V~GR@wW4H#ql0<;5q{u@Zu-h)+nhj|=jy75!Q&@^zs@Zr1N zce+2E_+;d>_^0vDSAS9Z&ys%naxJ{L;rn$k+!%~}9Q-J_Irp=k`rx=eI9?k}ZY(s8 z@gG0;(Q}(`{OpDLu}S^dWbN298;gy;$o7%(ZEhT7KXKl*%??`9w}%s-q&|D?)7QQ@ z{8i-3h(5Jc3t#zG;v-8Se#jAj>XX&aNIAERCMO^rNNG?~oYOn|zCgDKucA zL=59BO26Xnr6oAnISS=JgseXYNiO1_2#`UjTko;!%mV~6gadP!3}bW9ZvMXdcHC|F zO^-Lc=6A3Unh9=#R(O!Y%@0nkE7r^AxuGd_TJV$)d#>k|3pTG9MSl&&r zzT|FSk1w{%_xKXKe9(8vwL5I4hkgCKV_x4Yu3c`xH@W-t5nuoB6Q_MonvcY%;X;?- zj*7@v0h&Q61W6?v=*USTl~qlysN^<5T59;{9}y5AM1RE${obGUxSsqSgDD;UN5{ntoB#j- literal 0 HcmV?d00001 diff --git a/aisidebar/aisidebar.py b/aisidebar.py similarity index 100% rename from aisidebar/aisidebar.py rename to aisidebar.py diff --git a/aisidebar/chat_widget.py b/aisidebar/chat_widget.py deleted file mode 100644 index 0ecd57f..0000000 --- a/aisidebar/chat_widget.py +++ /dev/null @@ -1,192 +0,0 @@ -import threading -from ignis import widgets, app -from gi.repository import GLib -from .ollama_client import OllamaClient -from .conversation_manager import ConversationManager - - -class ChatWidget(widgets.Box): - """Chat interface widget with Ollama integration""" - - def __init__(self): - self._conversation_manager = ConversationManager() - self._ollama_client = OllamaClient() - self._current_model = self._ollama_client.default_model - - # Header with title and model - header_title = widgets.Label( - label="AI Sidebar", - halign="start", - css_classes=["title-2"], - ) - - model_name = self._current_model or "No local model detected" - - self._model_label = widgets.Label( - label=f"Model: {model_name}", - halign="start", - css_classes=["dim-label"], - ) - - header_box = widgets.Box( - vertical=True, - spacing=4, - child=[header_title, self._model_label], - ) - - # Message list - self._message_list = widgets.Box( - vertical=True, - spacing=8, - hexpand=True, - vexpand=True, - valign="start", - ) - - # Scrolled window for messages - self._scroller = widgets.Scroll( - hexpand=True, - vexpand=True, - min_content_height=300, - child=self._message_list, - ) - - # Input entry - self._entry = widgets.Entry( - hexpand=True, - placeholder_text="Ask a question…", - on_accept=lambda x: self._on_submit(), - ) - - # Send button - self._send_button = widgets.Button( - label="Send", - on_click=lambda x: self._on_submit(), - ) - - # Input box - input_box = widgets.Box( - spacing=8, - hexpand=True, - child=[self._entry, self._send_button], - ) - - # Main container - super().__init__( - vertical=True, - spacing=12, - hexpand=True, - vexpand=True, - child=[header_box, self._scroller, input_box], - css_classes=["ai-sidebar-content"], - ) - - # Set margins - self.set_margin_top(16) - self.set_margin_bottom(16) - self.set_margin_start(16) - self.set_margin_end(16) - - # Load initial messages - self._populate_initial_messages() - - def _populate_initial_messages(self): - """Load conversation history""" - for message in self._conversation_manager.messages: - self._append_message(message["role"], message["content"], persist=False) - - if not self._conversation_manager.messages: - self._append_message( - "assistant", - "Welcome! Ask a question to start a conversation.", - persist=True, - ) - - def _append_message(self, role: str, content: str, *, persist: bool = True): - """Add a message bubble to the chat""" - label_prefix = "You" if role == "user" else "Assistant" - - message_label = widgets.Label( - label=f"{label_prefix}: {content}", - halign="start", - xalign=0.0, - wrap=True, - wrap_mode="word_char", # Fixed: use underscore not hyphen - justify="left", - ) - - self._message_list.append(message_label) - self._scroll_to_bottom() - - if persist and self._conversation_manager: - self._conversation_manager.append_message(role, content) - - def _scroll_to_bottom(self): - """Scroll to the latest message""" - def _scroll(): - adjustment = self._scroller.get_vadjustment() - if adjustment: - adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size()) - return False - - GLib.idle_add(_scroll) - - def _set_input_enabled(self, enabled: bool): - """Enable/disable input controls""" - self._entry.set_sensitive(enabled) - self._send_button.set_sensitive(enabled) - - def _on_submit(self): - """Handle message submission""" - text = self._entry.text.strip() - if not text: - return - - self._entry.text = "" - self._append_message("user", text, persist=True) - self._request_response() - - def _request_response(self): - """Request AI response in background thread""" - model = self._current_model or self._ollama_client.default_model - if not model: - self._append_message( - "assistant", - "No Ollama models are available. Install a model to continue.", - persist=True, - ) - return - - history = self._conversation_manager.chat_messages - self._set_input_enabled(False) - - def _worker(messages): - response = self._ollama_client.chat(model=model, messages=list(messages)) - GLib.idle_add(self._handle_response, response, priority=GLib.PRIORITY_DEFAULT) - - thread = threading.Thread(target=_worker, args=(history,), daemon=True) - thread.start() - - def _handle_response(self, response): - """Handle AI response""" - self._set_input_enabled(True) - - if not response: - self._append_message( - "assistant", - "The model returned an empty response.", - persist=True, - ) - return False - - role = response.get("role", "assistant") - content = response.get("content") or "" - if not content: - content = "[No content received from Ollama]" - - self._append_message(role, content, persist=True) - return False - - def focus_input(self): - """Focus the input entry""" - self._entry.grab_focus() diff --git a/aisidebar/conversation_manager.py b/aisidebar/conversation_manager.py deleted file mode 100644 index 7e0b365..0000000 --- a/aisidebar/conversation_manager.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Conversation state management and persistence helpers.""" - -from __future__ import annotations - -import json -import os -import tempfile -from dataclasses import dataclass, field -from datetime import datetime, timezone -from pathlib import Path -from typing import ClassVar, Dict, Iterable, List, MutableMapping - -DEFAULT_CONVERSATION_ID = "default" - - -@dataclass -class ConversationState: - """In-memory representation of a conversation transcript.""" - - conversation_id: str - created_at: str - updated_at: str - messages: List[Dict[str, str]] = field(default_factory=list) - - -class ConversationManager: - """Load and persist conversation transcripts as JSON files.""" - - VALID_ROLES: ClassVar[set[str]] = {"system", "user", "assistant"} - - def __init__( - self, - storage_dir: str | Path | None = None, - conversation_id: str | None = None, - ) -> None: - module_root = Path(__file__).resolve().parent - default_storage = module_root / "data" / "conversations" - self._storage_dir = Path(storage_dir) if storage_dir else default_storage - self._storage_dir.mkdir(parents=True, exist_ok=True) - - self._conversation_id = conversation_id or DEFAULT_CONVERSATION_ID - self._path = self._storage_dir / f"{self._conversation_id}.json" - - self._state = self._load_state() - - # ------------------------------------------------------------------ properties - @property - def conversation_id(self) -> str: - return self._state.conversation_id - - @property - def messages(self) -> List[Dict[str, str]]: - return list(self._state.messages) - - @property - def chat_messages(self) -> List[Dict[str, str]]: - """Return messages formatted for the Ollama chat API.""" - return [ - {"role": msg["role"], "content": msg["content"]} - for msg in self._state.messages - ] - - # ------------------------------------------------------------------ public API - def append_message(self, role: str, content: str) -> Dict[str, str]: - """Append a new message and persist the updated transcript.""" - normalized_role = role.lower() - if normalized_role not in self.VALID_ROLES: - raise ValueError(f"Invalid role '{role}'. Expected one of {self.VALID_ROLES}.") - - timestamp = datetime.now(timezone.utc).isoformat() - message = { - "role": normalized_role, - "content": content, - "timestamp": timestamp, - } - - self._state.messages.append(message) - self._state.updated_at = timestamp - self._write_state() - return message - - def replace_messages(self, messages: Iterable[Dict[str, str]]) -> None: - """Replace the transcript contents. Useful for loading fixtures.""" - normalized: List[Dict[str, str]] = [] - for item in messages: - role = item.get("role", "").lower() - content = item.get("content", "") - if role not in self.VALID_ROLES: - continue - normalized.append( - { - "role": role, - "content": content, - "timestamp": item.get("timestamp") - or datetime.now(timezone.utc).isoformat(), - } - ) - - now = datetime.now(timezone.utc).isoformat() - self._state.messages = normalized - self._state.created_at = self._state.created_at or now - self._state.updated_at = now - self._write_state() - - # ------------------------------------------------------------------ persistence - def _load_state(self) -> ConversationState: - """Load the transcript from disk or create a fresh default.""" - if self._path.exists(): - try: - with self._path.open("r", encoding="utf-8") as fh: - payload = json.load(fh) - return self._state_from_payload(payload) - except (json.JSONDecodeError, OSError): - pass - - timestamp = datetime.now(timezone.utc).isoformat() - return ConversationState( - conversation_id=self._conversation_id, - created_at=timestamp, - updated_at=timestamp, - messages=[], - ) - - def _state_from_payload(self, payload: MutableMapping[str, object]) -> ConversationState: - """Normalize persisted data into ConversationState instances.""" - conversation_id = str(payload.get("id") or self._conversation_id) - created_at = str(payload.get("created_at") or datetime.now(timezone.utc).isoformat()) - updated_at = str(payload.get("updated_at") or created_at) - - messages_payload = payload.get("messages", []) - messages: List[Dict[str, str]] = [] - if isinstance(messages_payload, list): - for item in messages_payload: - if not isinstance(item, dict): - continue - role = str(item.get("role", "")).lower() - content = str(item.get("content", "")) - if role not in self.VALID_ROLES: - continue - timestamp = str( - item.get("timestamp") or datetime.now(timezone.utc).isoformat() - ) - messages.append({"role": role, "content": content, "timestamp": timestamp}) - - return ConversationState( - conversation_id=conversation_id, - created_at=created_at, - updated_at=updated_at, - messages=messages, - ) - - def _write_state(self) -> None: - """Persist the conversation state atomically.""" - payload = { - "id": self._state.conversation_id, - "created_at": self._state.created_at, - "updated_at": self._state.updated_at, - "messages": self._state.messages, - } - - with tempfile.NamedTemporaryFile( - "w", - encoding="utf-8", - dir=self._storage_dir, - delete=False, - prefix=f"{self._conversation_id}.", - suffix=".tmp", - ) as tmp_file: - json.dump(payload, tmp_file, indent=2, ensure_ascii=False) - tmp_file.flush() - os.fsync(tmp_file.fileno()) - - os.replace(tmp_file.name, self._path) diff --git a/aisidebar/ollama_client.py b/aisidebar/ollama_client.py deleted file mode 100644 index 6615977..0000000 --- a/aisidebar/ollama_client.py +++ /dev/null @@ -1,130 +0,0 @@ -"""Client utilities for interacting with the Ollama API via direct HTTP calls.""" - -from __future__ import annotations - -import json -from typing import Any, Dict, Iterable, Iterator -from urllib.request import Request, urlopen -from urllib.error import URLError, HTTPError - - -class OllamaClientError(RuntimeError): - """Base exception raised when Ollama operations fail.""" - - -class OllamaUnavailableError(OllamaClientError): - """Raised when the Ollama server is not available.""" - - -class OllamaClient: - """HTTP client for interacting with Ollama's REST API.""" - - def __init__(self, host: str | None = None) -> None: - self._host = host or "http://localhost:11434" - self._cached_models: list[str] | None = None - - # ------------------------------------------------------------------ helpers - @property - def is_available(self) -> bool: - """Check if Ollama server is reachable.""" - try: - req = Request(f"{self._host}/api/tags", method="GET") - with urlopen(req, timeout=2) as response: - return response.status == 200 - except (URLError, HTTPError, TimeoutError): - return False - - @property - def default_model(self) -> str | None: - """Get the first available model.""" - models = self.list_models() - return models[0] if models else None - - def list_models(self, force_refresh: bool = False) -> list[str]: - """Return the available model names, caching the result for quick reuse.""" - if self._cached_models is not None and not force_refresh: - return list(self._cached_models) - - try: - req = Request(f"{self._host}/api/tags", method="GET") - with urlopen(req, timeout=5) as response: - data = json.loads(response.read().decode()) - except (URLError, HTTPError, TimeoutError) as exc: - raise OllamaClientError(f"Failed to list models: {exc}") from exc - - models: list[str] = [] - for item in data.get("models", []): - name = item.get("name") or item.get("model") - if name: - models.append(name) - - self._cached_models = models - return list(models) - - # ------------------------------------------------------------------ chat APIs - def chat( - self, - *, - model: str, - messages: Iterable[Dict[str, str]], - ) -> dict[str, str] | None: - """Execute a blocking chat call against Ollama.""" - payload = { - "model": model, - "messages": list(messages), - "stream": False, - } - - try: - req = Request( - f"{self._host}/api/chat", - data=json.dumps(payload).encode("utf-8"), - headers={"Content-Type": "application/json"}, - method="POST", - ) - with urlopen(req, timeout=120) as response: - result = json.loads(response.read().decode()) - except (URLError, HTTPError, TimeoutError) as exc: - return { - "role": "assistant", - "content": f"Unable to reach Ollama: {exc}", - } - - # Parse the response - message = result.get("message") - if not message: - return {"role": "assistant", "content": ""} - - role = message.get("role", "assistant") - content = message.get("content", "") - - return {"role": role, "content": content} - - def stream_chat( - self, *, model: str, messages: Iterable[Dict[str, str]] - ) -> Iterator[dict[str, Any]]: - """Placeholder for streaming API - not yet implemented.""" - raise NotImplementedError("Streaming chat is not yet implemented") - - # ------------------------------------------------------------------ internals - def _make_request( - self, endpoint: str, method: str = "GET", data: dict | None = None - ) -> dict: - """Make an HTTP request to the Ollama API.""" - url = f"{self._host}{endpoint}" - - if data: - req = Request( - url, - data=json.dumps(data).encode("utf-8"), - headers={"Content-Type": "application/json"}, - method=method, - ) - else: - req = Request(url, method=method) - - try: - with urlopen(req, timeout=30) as response: - return json.loads(response.read().decode()) - except (URLError, HTTPError) as exc: - raise OllamaClientError(f"Request failed: {exc}") from exc diff --git a/chat_widget.py b/chat_widget.py new file mode 100644 index 0000000..955cb4a --- /dev/null +++ b/chat_widget.py @@ -0,0 +1,1072 @@ +import threading +import os +from ignis import widgets, app +from gi.repository import GLib, Gtk, Gdk +from .ollama_client import OllamaClient +from .conversation_manager import ConversationManager +from .conversation_archive import ConversationArchive +from .ollama_monitor import OllamaAvailabilityMonitor +from .streaming_handler import StreamingHandler +from .command_processor import CommandProcessor, CommandResult +from .reasoning_controller import ReasoningController + + +class ChatWidget(widgets.Box): + """Chat interface widget with Ollama integration""" + + def __init__(self): + # Load CSS for TextView styling + self._load_css() + self._conversation_manager = ConversationManager() + self._conversation_archive = ConversationArchive() + self._ollama_client = OllamaClient() + self._current_model = self._ollama_client.default_model + + # Initialize availability monitor + self._ollama_monitor = OllamaAvailabilityMonitor(self._ollama_client) + self._ollama_monitor.add_callback(self._on_ollama_availability_changed) + + # Initialize command processor + self._command_processor = CommandProcessor() + self._register_commands() + + # Initialize reasoning controller + self._reasoning_controller = ReasoningController() + + # Set initial model based on reasoning preference + if self._ollama_client.is_available: + self._current_model = self._reasoning_controller.get_model_name() + + # Header with title and model + header_title = widgets.Label( + label="AI Sidebar", + halign="start", + css_classes=["title-2"], + ) + + # Display connection status if Ollama unavailable at startup + if not self._ollama_client.is_available: + model_name = "Ollama not running" + else: + model_name = self._current_model or "No local model detected" + + self._model_label = widgets.Label( + label=f"Model: {model_name}", + halign="start", + css_classes=["dim-label"], + ) + + # Reasoning mode toggle button (using regular button with state tracking) + self._reasoning_enabled = self._reasoning_controller.is_enabled() + toggle_label = "🧠 Reasoning: ON" if self._reasoning_enabled else "🧠 Reasoning: OFF" + self._reasoning_toggle = widgets.Button( + label=toggle_label, + on_click=lambda x: self._on_reasoning_toggled(), + halign="end", + hexpand=False, + ) + + # Header top row with title and toggle + header_top = widgets.Box( + spacing=8, + hexpand=True, + child=[header_title, self._reasoning_toggle], + ) + + header_box = widgets.Box( + vertical=True, + spacing=4, + child=[header_top, self._model_label], + ) + + # Message list + self._message_list = widgets.Box( + vertical=True, + spacing=8, + hexpand=True, + vexpand=True, + valign="start", + ) + + # Scrolled window for messages + self._scroller = widgets.Scroll( + hexpand=True, + vexpand=True, + min_content_height=300, + child=self._message_list, + ) + + # Input text view (multi-line) + self._text_view = Gtk.TextView() + self._text_buffer = self._text_view.get_buffer() + self._text_view.set_wrap_mode(Gtk.WrapMode.WORD_CHAR) + self._text_view.set_hexpand(True) + self._text_view.set_vexpand(False) + self._text_view.set_left_margin(8) + self._text_view.set_right_margin(8) + self._text_view.set_top_margin(8) + self._text_view.set_bottom_margin(8) + self._text_view.set_size_request(300, 60) # Set explicit width and height + + # Set placeholder text + placeholder = "Ask a question…" if self._ollama_client.is_available else "Ollama not running - start with: ollama serve" + self._placeholder_text = placeholder + self._is_placeholder_shown = False + self._updating_placeholder = False + + # Add key event controller for Enter/Shift+Enter handling + key_controller = Gtk.EventControllerKey() + key_controller.connect("key-pressed", self._on_key_pressed) + self._text_view.add_controller(key_controller) + + # Add focus event controllers for placeholder handling + focus_controller = Gtk.EventControllerFocus() + focus_controller.connect("enter", self._on_text_view_focus_in) + focus_controller.connect("leave", self._on_text_view_focus_out) + self._text_view.add_controller(focus_controller) + + # Connect to buffer changed signal to handle typing + self._text_buffer.connect("changed", self._on_buffer_changed) + + # Scrolled window for text view + self._text_scroller = Gtk.ScrolledWindow() + self._text_scroller.set_child(self._text_view) + self._text_scroller.set_min_content_height(60) + self._text_scroller.set_max_content_height(200) + self._text_scroller.set_hexpand(True) + self._text_scroller.set_vexpand(False) + self._text_scroller.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) + self._text_scroller.set_has_frame(True) + self._text_scroller.set_size_request(300, 60) + + # Store reference for use in input_box + self._text_view_container = self._text_scroller + + # Send button + self._send_button = widgets.Button( + label="Send", + on_click=lambda x: self._on_submit(), + ) + + # Input box + input_box = widgets.Box( + spacing=8, + hexpand=True, + child=[self._text_view_container, self._send_button], + ) + + # Main container + super().__init__( + vertical=True, + spacing=12, + hexpand=True, + vexpand=True, + child=[header_box, self._scroller, input_box], + css_classes=["ai-sidebar-content"], + ) + + # Set margins + self.set_margin_top(16) + self.set_margin_bottom(16) + self.set_margin_start(16) + self.set_margin_end(16) + + # Auto-archive old messages on startup (Option 2) + self._auto_archive_old_messages() + + # Load initial messages + self._populate_initial_messages() + + # Initialize placeholder display + self._update_placeholder() + + # Disable input if Ollama unavailable at startup + if not self._ollama_client.is_available: + self._set_input_enabled(False) + + # Start monitoring Ollama availability + self._ollama_monitor.start() + + def _auto_archive_old_messages(self, keep_recent: int = 20): + """Auto-archive old messages on startup, keeping only recent ones. + + Args: + keep_recent: Number of recent messages to keep in active conversation + """ + # Only auto-archive if we have more than keep_recent messages + if len(self._conversation_manager.messages) <= keep_recent: + return + + # Get the full conversation state before trimming + full_state = self._conversation_manager._state + + # Trim the conversation and get removed messages + removed_messages = self._conversation_manager.trim_to_recent(keep_recent) + + if removed_messages: + # Create a state with only the old messages for archiving + from .conversation_manager import ConversationState + old_state = ConversationState( + conversation_id=full_state.conversation_id, + created_at=full_state.created_at, + updated_at=removed_messages[-1].get("timestamp", full_state.updated_at), + messages=removed_messages, + ) + + # Archive old messages without title (avoid blocking) + self._conversation_archive.archive_conversation(old_state) + + def _populate_initial_messages(self): + """Load conversation history""" + for message in self._conversation_manager.messages: + self._append_message(message["role"], message["content"], persist=False) + + if not self._conversation_manager.messages: + self._append_message( + "assistant", + "Welcome! Ask a question to start a conversation.", + persist=True, + ) + + def _append_message(self, role: str, content: str, *, persist: bool = True, reasoning: str | None = None): + """Add a message bubble to the chat + + Args: + role: Message role (user/assistant/system) + content: Message content + persist: Whether to save to conversation history + reasoning: Optional reasoning content to display separately + """ + label_prefix = "You" if role == "user" else "Assistant" + + # If reasoning content is provided and reasoning mode is enabled, format it specially + if reasoning and self._reasoning_controller.is_enabled(): + # Create a box to hold reasoning and content + message_box = widgets.Box( + vertical=True, + spacing=4, + halign="start", + ) + + # Add reasoning label with special styling + reasoning_label = widgets.Label( + label=f"πŸ’­ Reasoning: {reasoning}", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", + justify="left", + css_classes=["reasoning-content"], + ) + message_box.append(reasoning_label) + + # Add divider + divider = widgets.Box( + css_classes=["reasoning-divider"], + hexpand=True, + ) + divider.set_size_request(-1, 1) + message_box.append(divider) + + # Add main content + content_label = widgets.Label( + label=f"{label_prefix}: {content}", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", + justify="left", + ) + message_box.append(content_label) + + self._message_list.append(message_box) + else: + # Standard message without reasoning + message_label = widgets.Label( + label=f"{label_prefix}: {content}", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", + justify="left", + ) + self._message_list.append(message_label) + + self._scroll_to_bottom() + + if persist and self._conversation_manager: + self._conversation_manager.append_message(role, content) + + def _append_system_message(self, content: str): + """Add a system message (for command results) with distinct styling""" + message_label = widgets.Label( + label=f"System: {content}", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", + justify="left", + css_classes=["dim-label"], # Use dim-label for system messages + ) + + self._message_list.append(message_label) + self._scroll_to_bottom() + + def _clear_message_list(self): + """Safely clear all messages from the UI""" + # Collect all children first to avoid iterator issues + children = [] + child = self._message_list.get_first_child() + while child is not None: + children.append(child) + child = child.get_next_sibling() + + # Now remove them + for child in children: + self._message_list.remove(child) + + def _scroll_to_bottom(self): + """Scroll to the latest message""" + def _scroll(): + adjustment = self._scroller.get_vadjustment() + if adjustment: + adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size()) + return False + + GLib.idle_add(_scroll) + + def _set_input_enabled(self, enabled: bool): + """Enable/disable input controls""" + self._text_view.set_sensitive(enabled) + self._send_button.set_sensitive(enabled) + + def _update_placeholder(self): + """Update placeholder text visibility based on buffer content""" + # Prevent recursion during placeholder updates + if self._updating_placeholder: + return False + + self._updating_placeholder = True + + try: + start_iter = self._text_buffer.get_start_iter() + end_iter = self._text_buffer.get_end_iter() + text = self._text_buffer.get_text(start_iter, end_iter, False) + + if not text: + # Buffer is empty, show placeholder + if not self._is_placeholder_shown: + # Create placeholder tag if needed + if not hasattr(self, '_placeholder_tag'): + self._placeholder_tag = self._text_buffer.create_tag( + "placeholder", + foreground="gray" + ) + + self._text_buffer.set_text(self._placeholder_text) + self._text_buffer.apply_tag( + self._placeholder_tag, + self._text_buffer.get_start_iter(), + self._text_buffer.get_end_iter() + ) + self._is_placeholder_shown = True + else: + # Buffer has content + if self._is_placeholder_shown and text == self._placeholder_text: + # User started typing, remove placeholder + self._text_buffer.set_text("") + self._is_placeholder_shown = False + finally: + self._updating_placeholder = False + + return False # For GLib.idle_add + + def _on_buffer_changed(self, buffer): + """Handle text buffer changes to manage placeholder""" + # Prevent recursion during placeholder updates + if self._updating_placeholder: + return + + start_iter = buffer.get_start_iter() + end_iter = buffer.get_end_iter() + text = buffer.get_text(start_iter, end_iter, False) + + # If placeholder is shown and user typed something, clear it + if self._is_placeholder_shown and text != self._placeholder_text: + self._updating_placeholder = True + # User started typing, clear everything and keep only what they typed + # Extract what they typed (everything after placeholder) + if text.startswith(self._placeholder_text): + new_text = text[len(self._placeholder_text):] + buffer.set_text(new_text) + self._is_placeholder_shown = False + self._updating_placeholder = False + + def _on_text_view_focus_in(self, controller): + """Handle focus entering the text view""" + # Clear placeholder when user focuses + if self._is_placeholder_shown: + self._updating_placeholder = True + self._text_buffer.set_text("") + self._is_placeholder_shown = False + self._updating_placeholder = False + + def _on_text_view_focus_out(self, controller): + """Handle focus leaving the text view""" + # Show placeholder if buffer is empty + start_iter = self._text_buffer.get_start_iter() + end_iter = self._text_buffer.get_end_iter() + text = self._text_buffer.get_text(start_iter, end_iter, False) + + if not text: + self._update_placeholder() + + def _on_key_pressed(self, controller, keyval, keycode, state): + """Handle key press events for Enter/Shift+Enter""" + # Check if Enter key was pressed + if keyval == Gdk.KEY_Return or keyval == Gdk.KEY_KP_Enter: + # Check if Shift is held + if state & Gdk.ModifierType.SHIFT_MASK: + # Shift+Enter: allow newline (default behavior) + return False + else: + # Enter alone: submit message + self._on_submit() + return True # Prevent default newline insertion + + return False + + def _on_submit(self): + """Handle message submission""" + # Get text from TextView buffer + start_iter = self._text_buffer.get_start_iter() + end_iter = self._text_buffer.get_end_iter() + text = self._text_buffer.get_text(start_iter, end_iter, False).strip() + + # Ignore if empty or just placeholder + if not text or text == self._placeholder_text: + return + + # Clear the text buffer + self._text_buffer.set_text("") + + # Defer placeholder update to avoid GTK warnings during user action + GLib.idle_add(self._update_placeholder) + + # Check if this is a command + if self._command_processor.is_command(text): + # Execute command + result = self._command_processor.execute(text) + + # Display command result as system message + self._append_system_message(result.message) + return + + # Check Ollama availability before processing regular messages + if not self._ollama_client.is_available: + self._append_message( + "assistant", + "Ollama is not running. Please start Ollama with: ollama serve", + persist=False, + ) + return + + self._append_message("user", text, persist=True) + self._request_response() + + def _request_response(self): + """Request AI response in background thread with streaming""" + # Double-check availability before making request + if not self._ollama_client.is_available: + self._append_message( + "assistant", + "Ollama is not running. Please start Ollama with: ollama serve", + persist=False, + ) + return + + model = self._current_model or self._ollama_client.default_model + if not model: + self._append_message( + "assistant", + "No Ollama models are available. Install a model with: ollama pull llama2", + persist=True, + ) + return + + + + history = self._conversation_manager.chat_messages + self._set_input_enabled(False) + + # Create message container for streaming + message_container = widgets.Box( + vertical=True, + spacing=8, + halign="start", + ) + + # Create thinking box (initially hidden) + thinking_box = widgets.Box( + vertical=True, + spacing=0, + css_classes=["thinking-box"], + visible=False, + ) + + # Create collapsible header button + thinking_header_box = widgets.Box( + spacing=8, + halign="start", + ) + + thinking_icon = widgets.Label( + label="β–Ό", + css_classes=["thinking-icon"], + ) + + thinking_header_label = widgets.Label( + label="πŸ’­ Thinking...", + halign="start", + css_classes=["thinking-header"], + ) + + thinking_header_box.append(thinking_icon) + thinking_header_box.append(thinking_header_label) + + # Make header clickable + thinking_header_button = widgets.Button( + child=thinking_header_box, + css_classes=["thinking-header-button"], + ) + + # Create revealer for collapsible content + thinking_content = widgets.Label( + label="", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", + justify="left", + css_classes=["thinking-content"], + ) + + thinking_revealer = widgets.Revealer( + child=thinking_content, + transition_type="slide_down", + transition_duration=200, + reveal_child=True, + ) + + # Connect toggle functionality + def toggle_thinking(): + is_revealed = thinking_revealer.reveal_child + thinking_revealer.reveal_child = not is_revealed + thinking_icon.label = "β–Ό" if not is_revealed else "β–²" + + thinking_header_button.on_click = lambda x: toggle_thinking() + + thinking_box.append(thinking_header_button) + thinking_box.append(thinking_revealer) + + # Create main response label + response_label = widgets.Label( + label="Assistant: ...", + halign="start", + xalign=0.0, + wrap=True, + wrap_mode="word_char", + justify="left", + ) + + message_container.append(thinking_box) + message_container.append(response_label) + + self._message_list.append(message_container) + self._scroll_to_bottom() + + # Create streaming handler with both widgets + streaming_handler = StreamingHandler( + response_label, + self._scroller, + thinking_widget=thinking_content, + thinking_box=thinking_box + ) + + def _worker(messages, handler): + try: + handler.start_stream() + + # Get model-specific options + options = self._reasoning_controller.get_model_options() + + # Stream response tokens + for chunk in self._ollama_client.stream_chat( + model=model, + messages=list(messages), + options=options + ): + # Check for errors + if chunk.get("error"): + content = chunk.get("content", "An error occurred during streaming") + GLib.idle_add(self._handle_stream_error, content, priority=GLib.PRIORITY_DEFAULT) + return + + # Extract tokens from chunk + message = chunk.get("message", {}) + content_token = message.get("content", "") + thinking_token = message.get("thinking", "") + + if content_token: + handler.append_token(content_token) + + if thinking_token: + handler.append_thinking_token(thinking_token) + + # Check if streaming is complete + if chunk.get("done", False): + break + + # Finalize stream and get complete content + thinking_content, main_content = handler.finish_stream() + + # Persist the complete message + GLib.idle_add( + self._handle_stream_complete, + thinking_content, + main_content, + priority=GLib.PRIORITY_DEFAULT + ) + + except Exception as exc: + error_msg = f"Streaming error: {exc}" + GLib.idle_add(self._handle_stream_error, error_msg, priority=GLib.PRIORITY_DEFAULT) + + thread = threading.Thread(target=_worker, args=(history, streaming_handler), daemon=True) + thread.start() + + def _handle_response(self, response): + """Handle AI response (legacy non-streaming method)""" + self._set_input_enabled(True) + + if not response: + self._append_message( + "assistant", + "The model returned an empty response.", + persist=True, + ) + return False + + role = response.get("role", "assistant") + content = response.get("content") or "" + if not content: + content = "[No content received from Ollama]" + + self._append_message(role, content, persist=True) + return False + + def _handle_stream_token(self, token: str) -> bool: + """Handle individual stream token with thread-safe UI update. + + This method is called via GLib.idle_add for thread-safe UI updates. + + Args: + token: The token string to process + + Returns: + False to indicate this is a one-time callback + """ + # Token handling is now managed by StreamingHandler + # This method exists for potential future direct token handling + return False + + def _handle_stream_complete(self, thinking_content: str, main_content: str) -> bool: + """Handle stream completion and persist message. + + Args: + thinking_content: The thinking/reasoning content + main_content: The main response content + + Returns: + False to indicate this is a one-time callback + """ + + + # The message is already displayed with thinking by the StreamingHandler + # We just need to persist it to conversation history + + # Combine thinking and content for persistence + full_content = main_content + if thinking_content: + full_content = f"{thinking_content}\n{main_content}" + + if full_content and self._conversation_manager: + self._conversation_manager.append_message("assistant", full_content) + + # Re-enable input + self._set_input_enabled(True) + return False + + def _handle_stream_error(self, error_msg: str) -> bool: + """Handle streaming errors. + + Args: + error_msg: The error message to display + + Returns: + False to indicate this is a one-time callback + """ + # Display error message (don't persist errors) + self._append_message("assistant", error_msg, persist=False) + + # Re-enable input + self._set_input_enabled(True) + return False + + def focus_input(self): + """Focus the input text view""" + self._text_view.grab_focus() + + def _on_ollama_availability_changed(self, is_available: bool): + """Handle Ollama availability state changes""" + if is_available: + # Ollama became available - use model from reasoning controller + self._current_model = self._reasoning_controller.get_model_name() + model_name = self._current_model or "No local model detected" + self._model_label.label = f"Model: {model_name}" + self._placeholder_text = "Ask a question…" + self._update_placeholder() + self._set_input_enabled(True) + + # Show notification + self._append_message( + "assistant", + "Ollama connection restored! You can now chat.", + persist=False, + ) + else: + # Ollama became unavailable + self._model_label.label = "Model: Ollama not running" + self._placeholder_text = "Ollama not running - start with: ollama serve" + self._update_placeholder() + self._set_input_enabled(False) + + def _on_reasoning_toggled(self): + """Handle reasoning mode toggle button state changes""" + # Toggle the reasoning mode + new_state = self._reasoning_controller.toggle() + self._reasoning_enabled = new_state + + # Switch to the appropriate model + new_model = self._reasoning_controller.get_model_name() + self._current_model = new_model + + + + # Update button label + toggle_label = "🧠 Reasoning: ON" if new_state else "🧠 Reasoning: OFF" + self._reasoning_toggle.label = toggle_label + + # Update model label in header + self._model_label.label = f"Model: {new_model}" + + # Show feedback message + status = "enabled" if new_state else "disabled" + model_name = "Qwen3-4B-Thinking" if new_state else "Qwen3-4B-Instruct" + self._append_system_message(f"Reasoning mode {status} - switched to {model_name}") + + + def _load_css(self): + """Load CSS styling for TextView""" + css_provider = Gtk.CssProvider() + css_path = os.path.join(os.path.dirname(__file__), "style.css") + + if os.path.exists(css_path): + css_provider.load_from_path(css_path) + Gtk.StyleContext.add_provider_for_display( + Gdk.Display.get_default(), + css_provider, + Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION + ) + + def _register_commands(self): + """Register all slash commands with the command processor""" + self._command_processor.register_command("/new", self._cmd_new_conversation) + self._command_processor.register_command("/clear", self._cmd_new_conversation) + self._command_processor.register_command("/models", self._cmd_list_models) + self._command_processor.register_command("/model", self._cmd_switch_model) + self._command_processor.register_command("/list", self._cmd_list_conversations) + self._command_processor.register_command("/resume", self._cmd_resume_conversation) + + def _generate_conversation_title(self, messages: list) -> str: + """Generate a concise title for a conversation using AI. + + Args: + messages: List of conversation messages + + Returns: + Generated title or empty string if generation fails + """ + if not messages or not self._ollama_client.is_available: + return "" + + # Extract first few user messages for context + user_messages = [msg for msg in messages if msg.get("role") == "user"] + if not user_messages: + return "" + + # Use first 2-3 user messages for context + context_messages = user_messages[:3] + context_text = " ".join([msg.get("content", "")[:100] for msg in context_messages]) + + # Create a prompt for title generation + title_prompt = [ + { + "role": "system", + "content": "You are a helpful assistant that generates concise, descriptive titles for conversations. Generate a title that is 3-6 words maximum. Respond with ONLY the title, no quotes or extra text." + }, + { + "role": "user", + "content": f"Generate a short title (3-6 words) for a conversation that starts with: {context_text}" + } + ] + + try: + model = self._current_model or self._ollama_client.default_model + if not model: + return "" + + # Use non-streaming chat for title generation + response = self._ollama_client.chat(model=model, messages=title_prompt) + if response and response.get("message"): + title = response["message"].get("content", "").strip() + # Clean up the title (remove quotes, limit length) + title = title.strip('"\'').strip() + # Limit to 50 characters + if len(title) > 50: + title = title[:47] + "..." + return title + except Exception: + # If title generation fails, return empty string + pass + + return "" + + def _cmd_new_conversation(self, args: str) -> CommandResult: + """ + Start a new conversation, saving the current one. + + Args: + args: Command arguments (unused for /new and /clear) + + Returns: + CommandResult with success status and confirmation message + """ + # Archive current conversation if it has messages + archive_id = None + if self._conversation_manager.messages: + current_state = self._conversation_manager._state + + # Archive without title for now (avoid blocking main thread) + # Title generation will happen in background via auto-archive + archive_id = self._conversation_archive.archive_conversation(current_state) + + # Clear the conversation manager (Option 1: clear default.json) + self._conversation_manager.clear_messages() + + # Clear the message list UI safely + self._clear_message_list() + + # Add welcome message to new conversation + self._append_message( + "assistant", + "New conversation started. Previous conversation archived.", + persist=True, + ) + + if archive_id: + return CommandResult( + success=True, + message=f"Started new conversation. Previous conversation archived as '{archive_id}'" + ) + else: + return CommandResult( + success=True, + message="Started new conversation (no previous messages to archive)" + ) + + def _cmd_list_models(self, args: str) -> CommandResult: + """ + List available Ollama models. + + Args: + args: Command arguments (unused) + + Returns: + CommandResult with model list + """ + if not self._ollama_client.is_available: + return CommandResult( + success=False, + message="Ollama is not running. Start Ollama with: ollama serve" + ) + + models = self._ollama_client.list_models(force_refresh=True) + + if not models: + return CommandResult( + success=True, + message="No models available. Install a model with: ollama pull llama2" + ) + + # Format model list with current model highlighted + model_lines = [] + for model in models: + if model == self._current_model: + model_lines.append(f"β€’ {model} (current)") + else: + model_lines.append(f"β€’ {model}") + + message = "Available models:\n" + "\n".join(model_lines) + + return CommandResult( + success=True, + message=message, + data={"models": models} + ) + + def _cmd_switch_model(self, args: str) -> CommandResult: + """ + Switch to a different Ollama model. + + Args: + args: Model name to switch to + + Returns: + CommandResult with success status + """ + if not self._ollama_client.is_available: + return CommandResult( + success=False, + message="Ollama is not running. Start Ollama with: ollama serve" + ) + + model_name = args.strip() + + if not model_name: + return CommandResult( + success=False, + message="Usage: /model \nExample: /model llama2" + ) + + # Validate model exists + available_models = self._ollama_client.list_models(force_refresh=True) + + if model_name not in available_models: + return CommandResult( + success=False, + message=f"Model '{model_name}' not found. Use /models to see available models." + ) + + # Switch model + old_model = self._current_model + self._current_model = model_name + + # Update model label in header + self._model_label.label = f"Model: {model_name}" + + return CommandResult( + success=True, + message=f"Switched from '{old_model}' to '{model_name}'" + ) + + def _cmd_list_conversations(self, args: str) -> CommandResult: + """ + List all archived conversations with metadata. + + Args: + args: Command arguments (unused) + + Returns: + CommandResult with formatted conversation list + """ + conversations = self._conversation_archive.list_conversations() + + if not conversations: + return CommandResult( + success=True, + message="No archived conversations found. Use /new to archive the current conversation." + ) + + # Format conversation list + lines = ["Archived conversations:"] + for conv in conversations: + # Format timestamp for display + try: + from datetime import datetime + dt = datetime.fromisoformat(conv.updated_at.replace('Z', '+00:00')) + time_str = dt.strftime("%Y-%m-%d %H:%M") + except: + time_str = conv.updated_at[:16] # Fallback to raw string + + # Display title prominently, with archive_id as reference + lines.append(f"β€’ {conv.title}") + lines.append(f" ID: {conv.archive_id}") + lines.append(f" Updated: {time_str} | Messages: {conv.message_count}") + + lines.append("\nUse /resume to load a conversation") + + return CommandResult( + success=True, + message="\n".join(lines), + data={"conversations": [c.archive_id for c in conversations]} + ) + + def _cmd_resume_conversation(self, args: str) -> CommandResult: + """ + Resume an archived conversation by loading it. + + Args: + args: Archive ID to resume + + Returns: + CommandResult with success status + """ + archive_id = args.strip() + + if not archive_id: + return CommandResult( + success=False, + message="Usage: /resume \nUse /list to see available conversations" + ) + + # Load the conversation + conversation_state = self._conversation_archive.load_conversation(archive_id) + + if conversation_state is None: + return CommandResult( + success=False, + message=f"Conversation '{archive_id}' not found or could not be loaded.\nUse /list to see available conversations." + ) + + # Archive current conversation before switching + if self._conversation_manager.messages: + current_state = self._conversation_manager._state + # Archive without title (avoid blocking) + archive_id_saved = self._conversation_archive.archive_conversation(current_state) + + # Clear the message list UI safely + self._clear_message_list() + + # Create new conversation manager with loaded state + self._conversation_manager = ConversationManager() + self._conversation_manager.replace_messages(conversation_state.messages) + + # Repopulate message list from loaded conversation + for message in conversation_state.messages: + self._append_message(message["role"], message["content"], persist=False) + + return CommandResult( + success=True, + message=f"Resumed conversation '{archive_id}' with {len(conversation_state.messages)} messages" + ) diff --git a/command_processor.py b/command_processor.py new file mode 100644 index 0000000..be7fa46 --- /dev/null +++ b/command_processor.py @@ -0,0 +1,87 @@ +"""Command processing system for AI Sidebar slash commands.""" + +from dataclasses import dataclass +from typing import Callable, Optional + + +@dataclass +class CommandResult: + """Result of command execution.""" + success: bool + message: str + data: dict | None = None + + +class CommandProcessor: + """Parses and executes slash commands.""" + + def __init__(self): + """Initialize command processor with command registry.""" + self._handlers: dict[str, Callable] = {} + + def register_command(self, command: str, handler: Callable) -> None: + """ + Register a command handler. + + Args: + command: Command string (e.g., "/new", "/model") + handler: Callable that takes command arguments and returns CommandResult + """ + self._handlers[command.lower()] = handler + + def is_command(self, text: str) -> bool: + """ + Check if text starts with a command. + + Args: + text: User input text + + Returns: + True if text starts with a registered command + """ + if not text or not text.startswith("/"): + return False + + # Extract command (first word) + parts = text.strip().split(maxsplit=1) + command = parts[0].lower() + + return command in self._handlers + + def execute(self, text: str) -> CommandResult: + """ + Parse and execute command. + + Args: + text: User input text starting with command + + Returns: + CommandResult with execution status and message + """ + if not text or not text.startswith("/"): + return CommandResult( + success=False, + message="Invalid command format. Commands must start with '/'." + ) + + # Parse command and arguments + parts = text.strip().split(maxsplit=1) + command = parts[0].lower() + args = parts[1] if len(parts) > 1 else "" + + # Check if command is registered + if command not in self._handlers: + return CommandResult( + success=False, + message=f"Unknown command: {command}. Type /help for available commands." + ) + + # Execute command handler + try: + handler = self._handlers[command] + return handler(args) + except Exception as e: + return CommandResult( + success=False, + message=f"Error executing command: {str(e)}" + ) diff --git a/config.py b/config.py deleted file mode 100644 index dd14e8f..0000000 --- a/config.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Configuration defaults and helpers for the AI sidebar.""" - - -class Config: - """Placeholder configuration container.""" - - def __init__(self) -> None: - raise NotImplementedError("Configuration logic not implemented yet.") diff --git a/conversation_archive.py b/conversation_archive.py new file mode 100644 index 0000000..5c0a2e5 --- /dev/null +++ b/conversation_archive.py @@ -0,0 +1,180 @@ +"""Conversation archive management for multi-conversation persistence.""" + +from __future__ import annotations + +import hashlib +import json +from dataclasses import dataclass +from datetime import datetime, timezone +from pathlib import Path +from typing import List + +from .conversation_manager import ConversationState, DEFAULT_CONVERSATION_ID + + +@dataclass +class ConversationMetadata: + """Metadata for conversation list display.""" + + archive_id: str + title: str # AI-generated or user-provided title + created_at: str + updated_at: str + message_count: int + preview: str # First 50 chars of first user message + + +class ConversationArchive: + """Manages multiple conversation files with archiving capabilities.""" + + def __init__(self, storage_dir: str | Path | None = None) -> None: + if storage_dir is None: + module_root = Path(__file__).resolve().parent + storage_dir = module_root / "data" / "conversations" + + self._storage_dir = Path(storage_dir) + self._storage_dir.mkdir(parents=True, exist_ok=True) + + def generate_archive_id(self) -> str: + """Create unique archive ID: YYYYMMDD_HHMMSS_.""" + now = datetime.now(timezone.utc) + timestamp_part = now.strftime("%Y%m%d_%H%M%S") + + # Generate short hash from timestamp + microseconds for uniqueness + hash_input = f"{now.isoformat()}{now.microsecond}".encode("utf-8") + hash_digest = hashlib.sha256(hash_input).hexdigest()[:8] + + return f"archive_{timestamp_part}_{hash_digest}" + + def archive_conversation( + self, + conversation_state: ConversationState, + archive_id: str | None = None, + title: str | None = None + ) -> str: + """Save conversation with timestamp-based archive ID. + + Args: + conversation_state: The conversation to archive + archive_id: Optional custom archive ID, generates one if not provided + title: Optional title for the conversation + + Returns: + The archive ID used for the saved conversation + """ + if archive_id is None: + archive_id = self.generate_archive_id() + + archive_path = self._storage_dir / f"{archive_id}.json" + + payload = { + "id": archive_id, + "title": title or "", + "created_at": conversation_state.created_at, + "updated_at": conversation_state.updated_at, + "messages": conversation_state.messages, + } + + with archive_path.open("w", encoding="utf-8") as fh: + json.dump(payload, fh, indent=2, ensure_ascii=False) + + return archive_id + + def list_conversations(self) -> List[ConversationMetadata]: + """Return metadata for all saved conversations. + + Scans the storage directory for conversation files and extracts metadata. + Excludes the default.json active conversation file. + + Returns: + List of ConversationMetadata sorted by updated_at (newest first) + """ + conversations: List[ConversationMetadata] = [] + + for json_file in self._storage_dir.glob("*.json"): + # Skip the default active conversation + if json_file.stem == DEFAULT_CONVERSATION_ID: + continue + + try: + with json_file.open("r", encoding="utf-8") as fh: + payload = json.load(fh) + + archive_id = payload.get("id", json_file.stem) + title = payload.get("title", "") + created_at = payload.get("created_at", "") + updated_at = payload.get("updated_at", created_at) + messages = payload.get("messages", []) + message_count = len(messages) + + # Extract preview from first user message + preview = "" + for msg in messages: + if msg.get("role") == "user": + content = msg.get("content", "") + preview = content[:50] + if len(content) > 50: + preview += "..." + break + + # Use title if available, otherwise use archive_id + display_title = title if title else archive_id + + metadata = ConversationMetadata( + archive_id=archive_id, + title=display_title, + created_at=created_at, + updated_at=updated_at, + message_count=message_count, + preview=preview, + ) + conversations.append(metadata) + + except (json.JSONDecodeError, OSError, KeyError): + # Skip corrupted or inaccessible files + continue + + # Sort by updated_at, newest first + conversations.sort(key=lambda c: c.updated_at, reverse=True) + + return conversations + + def load_conversation(self, archive_id: str) -> ConversationState | None: + """Load archived conversation by ID. + + Args: + archive_id: The ID of the conversation to load + + Returns: + ConversationState if found and valid, None otherwise + """ + archive_path = self._storage_dir / f"{archive_id}.json" + + if not archive_path.exists(): + return None + + try: + with archive_path.open("r", encoding="utf-8") as fh: + payload = json.load(fh) + + conversation_id = payload.get("id", archive_id) + created_at = payload.get("created_at", datetime.now(timezone.utc).isoformat()) + updated_at = payload.get("updated_at", created_at) + messages = payload.get("messages", []) + + # Validate messages structure + validated_messages = [] + for msg in messages: + if isinstance(msg, dict) and "role" in msg and "content" in msg: + validated_messages.append(msg) + + return ConversationState( + conversation_id=conversation_id, + created_at=created_at, + updated_at=updated_at, + messages=validated_messages, + ) + + except (json.JSONDecodeError, OSError, KeyError): + # Handle JSON parsing errors and missing files gracefully + return None diff --git a/conversation_manager.py b/conversation_manager.py index 7e0b365..9db3e07 100644 --- a/conversation_manager.py +++ b/conversation_manager.py @@ -102,6 +102,39 @@ class ConversationManager: self._state.updated_at = now self._write_state() + def clear_messages(self) -> None: + """Clear all messages and reset the conversation state.""" + timestamp = datetime.now(timezone.utc).isoformat() + self._state = ConversationState( + conversation_id=self._conversation_id, + created_at=timestamp, + updated_at=timestamp, + messages=[], + ) + self._write_state() + + def trim_to_recent(self, keep_count: int = 20) -> List[Dict[str, str]]: + """Trim conversation to keep only recent messages, return removed messages. + + Args: + keep_count: Number of recent messages to keep + + Returns: + List of messages that were removed (older messages) + """ + if len(self._state.messages) <= keep_count: + return [] + + # Split messages into old and recent + removed_messages = self._state.messages[:-keep_count] + self._state.messages = self._state.messages[-keep_count:] + + # Update state and persist + self._state.updated_at = datetime.now(timezone.utc).isoformat() + self._write_state() + + return removed_messages + # ------------------------------------------------------------------ persistence def _load_state(self) -> ConversationState: """Load the transcript from disk or create a fresh default.""" diff --git a/data/conversations/.gitkeep b/data/conversations/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/aisidebar/data/conversations/default.json b/data/conversations/archive_20251026_063735_0790d478.json similarity index 64% rename from aisidebar/data/conversations/default.json rename to data/conversations/archive_20251026_063735_0790d478.json index 5e80c71..c0333ab 100644 --- a/aisidebar/data/conversations/default.json +++ b/data/conversations/archive_20251026_063735_0790d478.json @@ -1,7 +1,7 @@ { - "id": "default", + "id": "archive_20251026_063735_0790d478", "created_at": "2025-10-25T18:40:57.917675+00:00", - "updated_at": "2025-10-25T19:31:22.276254+00:00", + "updated_at": "2025-10-26T06:37:29.608604+00:00", "messages": [ { "role": "assistant", @@ -47,6 +47,41 @@ "role": "assistant", "content": "\nOkay, the user asked, \"What can I do to improve my programming skills?\" Let me think about how to approach this. First, I need to provide a comprehensive answer that covers various aspects of skill development. The user might be a beginner or intermediate programmer looking for ways to grow.\n\nI should start by breaking down the answer into clear, actionable steps. Maybe categorize them into learning fundamentals, practice, projects, etc. It's important to mention both theoretical and practical aspects. Also, the user might be interested in resources, so including some recommendations would be helpful.\n\nWait, the user might not have specified their current level. Should I address different levels? Maybe mention that the advice applies to all levels but emphasize the importance of fundamentals for beginners. Also, include tips on problem-solving, understanding algorithms, and staying updated with technologies.\n\nOh, and collaboration is key. Suggesting contributing to open source or working on team projects could be beneficial. Maybe mention debugging skills and code reviews as well. Oh, and the importance of documentation and writing clean code. \n\nI should also think about common pitfalls. For example, learning without practicing, or not understanding the basics. Emphasize the need for consistent practice. Maybe include some resources like books, online courses, or communities. \n\nWait, the user might be looking for a structured approach. So, listing steps in order of importance could help. Start with fundamentals, then practice, then projects, then advanced topics. Also, mention the importance of patience and persistence. \n\nI should make sure the answer is encouraging and not overwhelming. Break it into sections with headings for clarity. Use examples like coding challenges, personal projects, etc. Maybe include tips on using version control like Git. \n\nAlso, the user might be interested in how to stay motivated. Suggesting setting goals, tracking progress, and celebrating small wins could be useful. Oh, and the importance of understanding the problem before jumping into coding. \n\nI need to avoid being too technical or too vague. Balance between practical advice and theoretical knowledge. Maybe mention specific tools or platforms like LeetCode, HackerRank, or GitHub. \n\nWait, the user might not know where to start. Including resources like free courses or communities like Stack Overflow or Reddit could be helpful. Also, mentioning the importance of reading others' code and contributing to open source. \n\nI should also touch on soft skills like communication and teamwork, especially if they're aiming for a career in programming. Maybe mention the importance of understanding the bigger picture, like system design or architecture. \n\nLet me structure the answer with clear sections: Learn Fundamentals, Practice Regularly, Work on Projects, Learn Advanced Topics, Collaborate, Stay Updated, and More. Each section with a few bullet points. That way, it's easy to follow and covers all bases. \n\nI need to make sure the language is friendly and encouraging. Avoid jargon where possible, but still provide enough depth. Also, remind them that improvement takes time and consistent effort. \n\nOkay, that should cover the main points. Let me put it all together in a clear, organized manner.\n\n\nImproving your programming skills is a continuous journey, but with the right strategies, you can grow significantly. Here’s a structured approach to help you progress:\n\n---\n\n### **1. Master the Fundamentals**\n- **Learn the basics**: Ensure you understand core concepts like data types, control structures (loops, conditionals), functions, and data structures (arrays, lists, dictionaries).\n- **Study algorithms and problem-solving**: Practice solving problems using logic and algorithms (e.g., sorting, searching, recursion). Platforms like **LeetCode**, **HackerRank**, or **Codewars** are great for this.\n- **Understand the language’s syntax and idioms**: For your chosen language (e.g., Python, JavaScript, Java), learn its best practices and common patterns.\n\n---\n\n### **2. Practice Regularly**\n- **Write code daily**: Even 15–30 minutes a day can make a difference. Use coding challenges or build small projects.\n- **Debug and refactor**: Learn to identify and fix bugs. Refactor code to improve readability and efficiency.\n- **Use coding platforms**: Websites like **Exercism**, **CodinGame**, or **FreeCodeCamp** offer interactive exercises and projects.\n\n---\n\n### **3. Build Projects**\n- **Start with small projects**: Create tools, games, or utilities (e.g., a to-do list app, calculator, or weather app).\n- **Work on real-world problems**: Solve issues you encounter in daily life (e.g., automate file sorting, create a budget tracker).\n- **Contribute to open source**: Find beginner-friendly projects on **GitHub** and contribute fixes or features. This helps you learn collaboration and real-world coding practices.\n\n---\n\n### **4. Learn Advanced Topics**\n- **Explore frameworks and libraries**: Learn tools relevant to your goals (e.g., React for front-end, Django for back-end, or TensorFlow for AI).\n- **Study software design patterns**: Understand patterns like MVC, singleton, or observer to write scalable code.\n- **Learn about system architecture**: For larger projects, study how to design systems, databases, and APIs.\n\n---\n\n### **5. Collaborate and Learn from Others**\n- **Join coding communities**: Participate in forums like **Stack Overflow**, **Reddit (r/learnprogramming)**, or Discord groups.\n- **Code reviews**: Ask others to review your code (or review theirs) to learn best practices and spot improvements.\n- **Pair programming**: Work with others to solve problems collaboratively, which can expose you to new ideas and techniques.\n\n---\n\n### **6. Stay Updated and Curious**\n- **Follow industry trends**: Read blogs, newsletters, or podcasts like **The Changelog**, **Software Engineering Daily**, or **Dev.to**.\n- **Experiment with new technologies**: Try tools, languages, or frameworks that interest you (e.g., AI, blockchain, or web3).\n- **Attend workshops or courses**: Platforms like **Coursera**, **Udemy**, or **Pluralsight** offer structured learning paths.\n\n---\n\n### **7. Document and Reflect**\n- **Write explanations**: Teach others by summarizing what you’ve learned (e.g., write blog posts or explain concepts to a friend).\n- **Track progress**: Use tools like **GitHub**, **Notion**, or a journal to document your projects and growth.\n- **Review mistakes**: Analyze errors to avoid repeating them and improve your understanding.\n\n---\n\n### **8. Focus on Soft Skills**\n- **Communication**: Practice explaining your code and ideas clearly, especially when working in teams.\n- **Time management**: Set goals (e.g., \"Learn Python in 3 months\") and break tasks into manageable steps.\n- **Resilience**: Embrace failure as part of the learning process. Debugging and fixing bugs are essential skills.\n\n---\n\n### **Resources to Explore**\n- **Books**: *Clean Code* by Robert C. Martin, *The Pragmatic Programmer*, *Grokking Algorithms*.\n- **Courses**: FreeCodeCamp, Coursera, edX, or YouTube tutorials (e.g., Traversy Media, The Net Ninja).\n- **Tools**: Git for version control, VS Code or PyCharm for IDEs, and Docker for containerization.\n\n---\n\n### **Final Tips**\n- **Be patient**: Mastery takes time. Celebrate small wins (e.g., fixing a bug or completing a project).\n- **Stay consistent**: Even 10–15 minutes of daily practice can lead to significant growth over time.\n- **Ask questions**: Don’t hesitate to seek help when stuckβ€”others are often happy to share knowledge.\n\nBy combining theory, practice, and curiosity, you’ll steadily improve your skills and become a more confident, versatile programmer. πŸš€", "timestamp": "2025-10-25T19:31:22.276254+00:00" + }, + { + "role": "user", + "content": "What is the meaning of life?", + "timestamp": "2025-10-26T06:22:32.837799+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What is the meaning of life?\" That's a deep and philosophical question. I need to approach this carefully. First, I should acknowledge that there's no single answer. Different people and cultures have various perspectives.\n\nI should mention major philosophical viewpoints. Existentialism comes to mind, like Sartre and Camus, who say life has no inherent meaning, but we create our own. Then there's religious perspectives, such as the Abrahamic faiths where the purpose is to follow divine will. Eastern philosophies like Buddhism and Hinduism might focus on enlightenment or karma. \n\nAlso, scientific views could be included, like the idea that life's purpose is to survive and reproduce, but that's more biological. I should make sure to present these ideas without bias, as the user might be looking for a comprehensive overview.\n\nI need to keep the tone respectful and open-ended, encouraging the user to reflect on their own beliefs. Maybe end with a question to prompt further thought. Let me structure this clearly, separating the different perspectives and keeping it concise.\n\n\nThe question of the meaning of life is one of humanity’s oldest and most profound mysteries. There is no single answer, as it depends on personal beliefs, cultural context, and philosophical perspectives. Here are some common viewpoints:\n\n---\n\n### **1. Philosophical Perspectives**\n- **Existentialism**: Suggests life has no inherent meaning, but individuals create their own purpose through choices, actions, and creativity (e.g., Jean-Paul Sartre, Albert Camus). \n- **Nihilism**: Posits that life lacks intrinsic meaning, purpose, or value. \n- **Stoicism**: Emphasizes living in harmony with nature, cultivating virtue, and accepting what cannot be controlled. \n- **Utilitarianism**: Seeks to maximize happiness and minimize suffering for the greatest number. \n\n---\n\n### **2. Religious and Spiritual Views**\n- **Abrahamic Faiths (Christianity, Islam, Judaism)**: Often emphasize serving a higher power, following divine commandments, and seeking salvation or union with the divine. \n- **Buddhism**: Focuses on overcoming suffering (dukkha) through enlightenment (nirvana) and compassion. \n- **Hinduism**: Views life as part of a cycle of rebirth (samsara), with the goal of achieving moksha (liberation from the cycle). \n- **Pantheism/Animism**: Sees the divine in nature and life itself, often emphasizing harmony with the universe. \n\n---\n\n### **3. Scientific and Secular Views**\n- **Biological Perspective**: Life’s purpose could be seen as survival, reproduction, and the continuation of the species. \n- **Cosmic Perspective**: Some argue life is a cosmic accident, and meaning is self-created through curiosity, exploration, and connection with others. \n- **Humanism**: Emphasizes human potential, ethics, and the pursuit of knowledge, happiness, and social progress. \n\n---\n\n### **4. Personal and Subjective Views**\n- For many, meaning arises from relationships, creativity, personal growth, or contributing to others. \n- Others find purpose in art, science, activism, or spiritual practices. \n\n---\n\n### **A Thoughtful Approach**\nUltimately, the meaning of life is a deeply personal question. It may involve: \n- **Exploring your values** and what brings you fulfillment. \n- **Engaging with art, nature, or philosophy** to reflect on larger questions. \n- **Embracing uncertainty** and finding joy in the journey itself. \n\nAs the poet Rumi wrote: *\"You are not a drop in the ocean. You are the entire ocean in a drop.\"* Perhaps the answer lies in discovering your own unique role in the universe. 🌌 \n\nWhat do you think? How do you approach this question? 😊", + "timestamp": "2025-10-26T06:22:51.836438+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation saved.", + "timestamp": "2025-10-26T06:31:20.958140+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation saved.", + "timestamp": "2025-10-26T06:33:49.654297+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation saved.", + "timestamp": "2025-10-26T06:34:02.693113+00:00" + }, + { + "role": "user", + "content": "Hello there! What can you help me with?", + "timestamp": "2025-10-26T06:37:18.116891+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said, \"Hello there! What can you help me with?\" I need to respond appropriately. Let me think about the context.\n\nFirst, the user has been asking questions about programming, the meaning of life, and other topics. They might be looking for assistance in various areas. Since they mentioned \"help me with,\" I should list the main areas I can assist with.\n\nI should cover general knowledge, programming, writing, and maybe other skills. But I need to keep it concise and friendly. Also, the user might be a student, a professional, or just curious. Offering a range of options makes sense.\n\nI should mention specific examples for each category to give them a clear idea. For instance, under programming, they can ask about coding problems or language syntax. For writing, they can get help with essays or stories. \n\nI should also invite them to ask any questions they have, showing openness. Maybe add an emoji to keep the tone friendly. Let me structure it step by step to ensure clarity and helpfulness.\n\n\nHello! 😊 I can help with a wide range of topics, including:\n\n### 🧠 **General Knowledge** \n- Answering questions about science, history, culture, or pop culture. \n- Explaining complex concepts in simple terms. \n\n### πŸ’» **Programming & Tech** \n- Debugging code, understanding syntax, or learning new languages (Python, JavaScript, etc.). \n- Explaining algorithms, software design, or tech trends. \n\n### ✍️ **Writing & Creativity** \n- Helping draft essays, stories, emails, or scripts. \n- Offering feedback on ideas or creative projects. \n\n### πŸ“š **Learning & Study** \n- Summarizing books, articles, or academic papers. \n- Creating study guides or practice problems. \n\n### πŸ€” **Life & Philosophy** \n- Discussing big questions (like the meaning of life!). \n- Offering advice on personal growth, goals, or decisions. \n\nFeel free to ask anythingβ€”no question is too small or silly! What would you like to explore? 🌟", + "timestamp": "2025-10-26T06:37:29.608604+00:00" } ] } \ No newline at end of file diff --git a/data/conversations/archive_20251026_064314_9ec25159.json b/data/conversations/archive_20251026_064314_9ec25159.json new file mode 100644 index 0000000..9ce5d57 --- /dev/null +++ b/data/conversations/archive_20251026_064314_9ec25159.json @@ -0,0 +1,93 @@ +{ + "id": "archive_20251026_064314_9ec25159", + "title": "", + "created_at": "2025-10-25T18:40:57.917675+00:00", + "updated_at": "2025-10-26T06:37:35.693027+00:00", + "messages": [ + { + "role": "assistant", + "content": "Welcome! Ask a question to start a conversation.", + "timestamp": "2025-10-25T18:40:57.919077+00:00" + }, + { + "role": "user", + "content": "Hello!", + "timestamp": "2025-10-25T19:21:06.686290+00:00" + }, + { + "role": "assistant", + "content": "No Ollama models are available. Install a model to continue.", + "timestamp": "2025-10-25T19:21:06.690475+00:00" + }, + { + "role": "user", + "content": "Hello", + "timestamp": "2025-10-25T19:26:08.659211+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"Hello\" again. The previous response mentioned that no Ollama models are available and suggested installing one. Maybe the user isn't familiar with Ollama or the setup process. I should explain what Ollama is and how to install a model. Let me check the steps for installing a model with Ollama. First, they need to download the model files, then use the Ollama CLI to load them. I should provide clear instructions, maybe mention popular models like Llama or Mistral. Also, offer further assistance if they need help with the installation. Keep the tone friendly and helpful.\n\n\nHello! It looks like you're trying to use Ollama, but no models are installed yet. To get started, you'll need to download and install a model using the Ollama CLI. Here's how:\n\n1. **Install Ollama**: If you haven't already, download it from [https://ollama.com](https://ollama.com) and follow the installation instructions for your OS.\n2. **Pull a Model**: Use the command `ollama pull ` (e.g., `ollama pull llama3` or `ollama pull mistral`).\n3. **Run the Model**: Once installed, use `ollama run ` to start a chat session.\n\nLet me know if you need help with installation or model selection! 😊", + "timestamp": "2025-10-25T19:26:15.674075+00:00" + }, + { + "role": "user", + "content": "What is a \"Baumstriezel\"?", + "timestamp": "2025-10-25T19:27:39.745357+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What is a 'Baumstriezel'?\" I need to figure out what that is. First, I'll check if I know anything about it. I remember that \"Baumstriezel\" sounds German. Maybe it's a type of pastry or a holiday item? Let me think.\n\nWait, \"Baum\" means tree in German, and \"Striezel\" could be related to something like a roll or a cake. Maybe it's a tree-shaped pastry? I think there's a German dessert called \"Stollen,\" which is a fruit cake, but maybe \"Baumstriezel\" is a variation. Alternatively, could it be a Christmas decoration? Like a tree-shaped ornament?\n\nI should verify. Let me recall. In some German-speaking regions, there are traditional sweets. \"Striezel\" might refer to a type of roll or cake. Maybe \"Baumstriezel\" is a tree-shaped version of that. Alternatively, could it be a type of Christmas tree made from candy or something else?\n\nWait, I think there's a German confection called \"Baumstriezel\" which is a tree-shaped sweet. It's often made with marzipan and chocolate, decorated with nuts and fruits. It's a traditional Christmas treat. Let me confirm that. Yes, I believe that's right. So the answer would be that it's a tree-shaped sweet, typically made with marzipan and chocolate, popular during Christmas. It's similar to a Stollen but in the shape of a tree. I should explain that and maybe mention it's a festive item, often found in Germany and other German-speaking countries during the holiday season.\n\n\nA **Baumstriezel** (sometimes spelled **Baumstriezel** or **Baumstriezel**) is a traditional German Christmas sweet shaped like a tree. It is typically made with **marzipan** and **chocolate**, and is decorated with **nuts, fruits, and candied peel** to resemble a tree. The name translates to \"tree roll\" or \"tree cake\" in English. \n\nThis festive treat is often enjoyed during the **Christmas season** in Germany and other German-speaking regions. It’s a sweet, chewy dessert that symbolizes the holiday spirit, often displayed as a decorative item or eaten as a treat. \n\nIf you’re curious about its taste or how it’s made, I’d be happy to share more details! πŸŽ„βœ¨", + "timestamp": "2025-10-25T19:27:49.659293+00:00" + }, + { + "role": "user", + "content": "What can I do do improve my programming skills?", + "timestamp": "2025-10-25T19:30:51.726827+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What can I do to improve my programming skills?\" Let me think about how to approach this. First, I need to provide a comprehensive answer that covers various aspects of skill development. The user might be a beginner or intermediate programmer looking for ways to grow.\n\nI should start by breaking down the answer into clear, actionable steps. Maybe categorize them into learning fundamentals, practice, projects, etc. It's important to mention both theoretical and practical aspects. Also, the user might be interested in resources, so including some recommendations would be helpful.\n\nWait, the user might not have specified their current level. Should I address different levels? Maybe mention that the advice applies to all levels but emphasize the importance of fundamentals for beginners. Also, include tips on problem-solving, understanding algorithms, and staying updated with technologies.\n\nOh, and collaboration is key. Suggesting contributing to open source or working on team projects could be beneficial. Maybe mention debugging skills and code reviews as well. Oh, and the importance of documentation and writing clean code. \n\nI should also think about common pitfalls. For example, learning without practicing, or not understanding the basics. Emphasize the need for consistent practice. Maybe include some resources like books, online courses, or communities. \n\nWait, the user might be looking for a structured approach. So, listing steps in order of importance could help. Start with fundamentals, then practice, then projects, then advanced topics. Also, mention the importance of patience and persistence. \n\nI should make sure the answer is encouraging and not overwhelming. Break it into sections with headings for clarity. Use examples like coding challenges, personal projects, etc. Maybe include tips on using version control like Git. \n\nAlso, the user might be interested in how to stay motivated. Suggesting setting goals, tracking progress, and celebrating small wins could be useful. Oh, and the importance of understanding the problem before jumping into coding. \n\nI need to avoid being too technical or too vague. Balance between practical advice and theoretical knowledge. Maybe mention specific tools or platforms like LeetCode, HackerRank, or GitHub. \n\nWait, the user might not know where to start. Including resources like free courses or communities like Stack Overflow or Reddit could be helpful. Also, mentioning the importance of reading others' code and contributing to open source. \n\nI should also touch on soft skills like communication and teamwork, especially if they're aiming for a career in programming. Maybe mention the importance of understanding the bigger picture, like system design or architecture. \n\nLet me structure the answer with clear sections: Learn Fundamentals, Practice Regularly, Work on Projects, Learn Advanced Topics, Collaborate, Stay Updated, and More. Each section with a few bullet points. That way, it's easy to follow and covers all bases. \n\nI need to make sure the language is friendly and encouraging. Avoid jargon where possible, but still provide enough depth. Also, remind them that improvement takes time and consistent effort. \n\nOkay, that should cover the main points. Let me put it all together in a clear, organized manner.\n\n\nImproving your programming skills is a continuous journey, but with the right strategies, you can grow significantly. Here’s a structured approach to help you progress:\n\n---\n\n### **1. Master the Fundamentals**\n- **Learn the basics**: Ensure you understand core concepts like data types, control structures (loops, conditionals), functions, and data structures (arrays, lists, dictionaries).\n- **Study algorithms and problem-solving**: Practice solving problems using logic and algorithms (e.g., sorting, searching, recursion). Platforms like **LeetCode**, **HackerRank**, or **Codewars** are great for this.\n- **Understand the language’s syntax and idioms**: For your chosen language (e.g., Python, JavaScript, Java), learn its best practices and common patterns.\n\n---\n\n### **2. Practice Regularly**\n- **Write code daily**: Even 15–30 minutes a day can make a difference. Use coding challenges or build small projects.\n- **Debug and refactor**: Learn to identify and fix bugs. Refactor code to improve readability and efficiency.\n- **Use coding platforms**: Websites like **Exercism**, **CodinGame**, or **FreeCodeCamp** offer interactive exercises and projects.\n\n---\n\n### **3. Build Projects**\n- **Start with small projects**: Create tools, games, or utilities (e.g., a to-do list app, calculator, or weather app).\n- **Work on real-world problems**: Solve issues you encounter in daily life (e.g., automate file sorting, create a budget tracker).\n- **Contribute to open source**: Find beginner-friendly projects on **GitHub** and contribute fixes or features. This helps you learn collaboration and real-world coding practices.\n\n---\n\n### **4. Learn Advanced Topics**\n- **Explore frameworks and libraries**: Learn tools relevant to your goals (e.g., React for front-end, Django for back-end, or TensorFlow for AI).\n- **Study software design patterns**: Understand patterns like MVC, singleton, or observer to write scalable code.\n- **Learn about system architecture**: For larger projects, study how to design systems, databases, and APIs.\n\n---\n\n### **5. Collaborate and Learn from Others**\n- **Join coding communities**: Participate in forums like **Stack Overflow**, **Reddit (r/learnprogramming)**, or Discord groups.\n- **Code reviews**: Ask others to review your code (or review theirs) to learn best practices and spot improvements.\n- **Pair programming**: Work with others to solve problems collaboratively, which can expose you to new ideas and techniques.\n\n---\n\n### **6. Stay Updated and Curious**\n- **Follow industry trends**: Read blogs, newsletters, or podcasts like **The Changelog**, **Software Engineering Daily**, or **Dev.to**.\n- **Experiment with new technologies**: Try tools, languages, or frameworks that interest you (e.g., AI, blockchain, or web3).\n- **Attend workshops or courses**: Platforms like **Coursera**, **Udemy**, or **Pluralsight** offer structured learning paths.\n\n---\n\n### **7. Document and Reflect**\n- **Write explanations**: Teach others by summarizing what you’ve learned (e.g., write blog posts or explain concepts to a friend).\n- **Track progress**: Use tools like **GitHub**, **Notion**, or a journal to document your projects and growth.\n- **Review mistakes**: Analyze errors to avoid repeating them and improve your understanding.\n\n---\n\n### **8. Focus on Soft Skills**\n- **Communication**: Practice explaining your code and ideas clearly, especially when working in teams.\n- **Time management**: Set goals (e.g., \"Learn Python in 3 months\") and break tasks into manageable steps.\n- **Resilience**: Embrace failure as part of the learning process. Debugging and fixing bugs are essential skills.\n\n---\n\n### **Resources to Explore**\n- **Books**: *Clean Code* by Robert C. Martin, *The Pragmatic Programmer*, *Grokking Algorithms*.\n- **Courses**: FreeCodeCamp, Coursera, edX, or YouTube tutorials (e.g., Traversy Media, The Net Ninja).\n- **Tools**: Git for version control, VS Code or PyCharm for IDEs, and Docker for containerization.\n\n---\n\n### **Final Tips**\n- **Be patient**: Mastery takes time. Celebrate small wins (e.g., fixing a bug or completing a project).\n- **Stay consistent**: Even 10–15 minutes of daily practice can lead to significant growth over time.\n- **Ask questions**: Don’t hesitate to seek help when stuckβ€”others are often happy to share knowledge.\n\nBy combining theory, practice, and curiosity, you’ll steadily improve your skills and become a more confident, versatile programmer. πŸš€", + "timestamp": "2025-10-25T19:31:22.276254+00:00" + }, + { + "role": "user", + "content": "What is the meaning of life?", + "timestamp": "2025-10-26T06:22:32.837799+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What is the meaning of life?\" That's a deep and philosophical question. I need to approach this carefully. First, I should acknowledge that there's no single answer. Different people and cultures have various perspectives.\n\nI should mention major philosophical viewpoints. Existentialism comes to mind, like Sartre and Camus, who say life has no inherent meaning, but we create our own. Then there's religious perspectives, such as the Abrahamic faiths where the purpose is to follow divine will. Eastern philosophies like Buddhism and Hinduism might focus on enlightenment or karma. \n\nAlso, scientific views could be included, like the idea that life's purpose is to survive and reproduce, but that's more biological. I should make sure to present these ideas without bias, as the user might be looking for a comprehensive overview.\n\nI need to keep the tone respectful and open-ended, encouraging the user to reflect on their own beliefs. Maybe end with a question to prompt further thought. Let me structure this clearly, separating the different perspectives and keeping it concise.\n\n\nThe question of the meaning of life is one of humanity’s oldest and most profound mysteries. There is no single answer, as it depends on personal beliefs, cultural context, and philosophical perspectives. Here are some common viewpoints:\n\n---\n\n### **1. Philosophical Perspectives**\n- **Existentialism**: Suggests life has no inherent meaning, but individuals create their own purpose through choices, actions, and creativity (e.g., Jean-Paul Sartre, Albert Camus). \n- **Nihilism**: Posits that life lacks intrinsic meaning, purpose, or value. \n- **Stoicism**: Emphasizes living in harmony with nature, cultivating virtue, and accepting what cannot be controlled. \n- **Utilitarianism**: Seeks to maximize happiness and minimize suffering for the greatest number. \n\n---\n\n### **2. Religious and Spiritual Views**\n- **Abrahamic Faiths (Christianity, Islam, Judaism)**: Often emphasize serving a higher power, following divine commandments, and seeking salvation or union with the divine. \n- **Buddhism**: Focuses on overcoming suffering (dukkha) through enlightenment (nirvana) and compassion. \n- **Hinduism**: Views life as part of a cycle of rebirth (samsara), with the goal of achieving moksha (liberation from the cycle). \n- **Pantheism/Animism**: Sees the divine in nature and life itself, often emphasizing harmony with the universe. \n\n---\n\n### **3. Scientific and Secular Views**\n- **Biological Perspective**: Life’s purpose could be seen as survival, reproduction, and the continuation of the species. \n- **Cosmic Perspective**: Some argue life is a cosmic accident, and meaning is self-created through curiosity, exploration, and connection with others. \n- **Humanism**: Emphasizes human potential, ethics, and the pursuit of knowledge, happiness, and social progress. \n\n---\n\n### **4. Personal and Subjective Views**\n- For many, meaning arises from relationships, creativity, personal growth, or contributing to others. \n- Others find purpose in art, science, activism, or spiritual practices. \n\n---\n\n### **A Thoughtful Approach**\nUltimately, the meaning of life is a deeply personal question. It may involve: \n- **Exploring your values** and what brings you fulfillment. \n- **Engaging with art, nature, or philosophy** to reflect on larger questions. \n- **Embracing uncertainty** and finding joy in the journey itself. \n\nAs the poet Rumi wrote: *\"You are not a drop in the ocean. You are the entire ocean in a drop.\"* Perhaps the answer lies in discovering your own unique role in the universe. 🌌 \n\nWhat do you think? How do you approach this question? 😊", + "timestamp": "2025-10-26T06:22:51.836438+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation saved.", + "timestamp": "2025-10-26T06:31:20.958140+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation saved.", + "timestamp": "2025-10-26T06:33:49.654297+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation saved.", + "timestamp": "2025-10-26T06:34:02.693113+00:00" + }, + { + "role": "user", + "content": "Hello there! What can you help me with?", + "timestamp": "2025-10-26T06:37:18.116891+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said, \"Hello there! What can you help me with?\" I need to respond appropriately. Let me think about the context.\n\nFirst, the user has been asking questions about programming, the meaning of life, and other topics. They might be looking for assistance in various areas. Since they mentioned \"help me with,\" I should list the main areas I can assist with.\n\nI should cover general knowledge, programming, writing, and maybe other skills. But I need to keep it concise and friendly. Also, the user might be a student, a professional, or just curious. Offering a range of options makes sense.\n\nI should mention specific examples for each category to give them a clear idea. For instance, under programming, they can ask about coding problems or language syntax. For writing, they can get help with essays or stories. \n\nI should also invite them to ask any questions they have, showing openness. Maybe add an emoji to keep the tone friendly. Let me structure it step by step to ensure clarity and helpfulness.\n\n\nHello! 😊 I can help with a wide range of topics, including:\n\n### 🧠 **General Knowledge** \n- Answering questions about science, history, culture, or pop culture. \n- Explaining complex concepts in simple terms. \n\n### πŸ’» **Programming & Tech** \n- Debugging code, understanding syntax, or learning new languages (Python, JavaScript, etc.). \n- Explaining algorithms, software design, or tech trends. \n\n### ✍️ **Writing & Creativity** \n- Helping draft essays, stories, emails, or scripts. \n- Offering feedback on ideas or creative projects. \n\n### πŸ“š **Learning & Study** \n- Summarizing books, articles, or academic papers. \n- Creating study guides or practice problems. \n\n### πŸ€” **Life & Philosophy** \n- Discussing big questions (like the meaning of life!). \n- Offering advice on personal growth, goals, or decisions. \n\nFeel free to ask anythingβ€”no question is too small or silly! What would you like to explore? 🌟", + "timestamp": "2025-10-26T06:37:29.608604+00:00" + }, + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T06:37:35.693027+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_064414_f8639758.json b/data/conversations/archive_20251026_064414_f8639758.json new file mode 100644 index 0000000..1b677d5 --- /dev/null +++ b/data/conversations/archive_20251026_064414_f8639758.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_064414_f8639758", + "title": "", + "created_at": "2025-10-26T06:43:14.550171+00:00", + "updated_at": "2025-10-26T06:43:47.212458+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T06:43:14.554066+00:00" + }, + { + "role": "user", + "content": "Whats up?", + "timestamp": "2025-10-26T06:43:44.177353+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"Whats up?\" which is a casual greeting. I need to respond in a friendly and open manner. Since they're starting a new conversation, I should keep it light and inviting. Maybe ask how they're doing or what they need help with. Let me make sure the tone is positive and approachable. I should avoid any formal language and keep it simple. Also, since the previous conversation was archived, there's no need to reference that. Just focus on the current interaction. Let me check for any typos and ensure the response flows naturally.\n\n\nHey there! Just checking in to see how you're doing. What's on your mind today? 😊 I'm here to help with anything you need!", + "timestamp": "2025-10-26T06:43:47.212458+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_064712_da9eaa9a.json b/data/conversations/archive_20251026_064712_da9eaa9a.json new file mode 100644 index 0000000..02536b9 --- /dev/null +++ b/data/conversations/archive_20251026_064712_da9eaa9a.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_064712_da9eaa9a", + "title": "", + "created_at": "2025-10-26T06:44:14.844517+00:00", + "updated_at": "2025-10-26T06:47:01.787913+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T06:44:14.848276+00:00" + }, + { + "role": "user", + "content": "What can you program", + "timestamp": "2025-10-26T06:46:48.452468+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What can you program?\" I need to explain the range of programming tasks I can handle. First, I should list the main areas like coding in various languages, web development, data analysis, machine learning, etc. But I should be clear that I can't execute code, only provide guidance.\n\nWait, the user might be a beginner, so I should explain in simple terms. Maybe start with the languages I support, like Python, JavaScript, Java, etc. Then mention specific tasks such as creating scripts, building websites, analyzing data, developing apps. Also, include AI-related tasks like NLP or image recognition.\n\nI should highlight that I can't run code but can help write it. Maybe add examples of common tasks to make it concrete. Also, mention collaboration, like helping debug or optimize code. Keep the tone friendly and encouraging. Make sure to invite them to ask for specific help if they have a project in mind. Avoid technical jargon where possible. Check for clarity and ensure all key points are covered without being too verbose.\n\n\nI can assist with a wide range of programming tasks and concepts! Here's what I can help with:\n\n### **Languages & Tools**\n- **Scripting**: Python, JavaScript, Bash, PowerShell, etc.\n- **Web Development**: HTML, CSS, JavaScript (frontend/backend), frameworks like React, Node.js, Django, etc.\n- **Data Science**: Python (Pandas, NumPy, Matplotlib), R, SQL, data visualization.\n- **Machine Learning/AI**: Python libraries (TensorFlow, PyTorch, scikit-learn), NLP, image processing, model training.\n- **Mobile/Web Apps**: React Native, Flutter, Flutter Web, etc.\n- **Game Development**: Unity (C#), Godot (GDScript), etc.\n- **Automation**: Scripting for repetitive tasks, API integrations, CI/CD pipelines.\n\n### **Tasks I Can Help With**\n1. **Code Writing/Debugging**: \n - Write code for specific tasks (e.g., \"Create a Python script to sort a list\"). \n - Fix errors in existing code. \n - Optimize performance or readability.\n\n2. **Project Planning**: \n - Design architecture for apps, websites, or systems. \n - Suggest tools/libraries for a project.\n\n3. **Tutorials & Explanations**: \n - Explain concepts (e.g., \"How does recursion work?\"). \n - Break down complex topics into simple steps. \n - Provide examples for learning.\n\n4. **Collaboration**: \n - Help brainstorm solutions to problems. \n - Review code logic or structure. \n - Suggest best practices (e.g., code style, security, scalability).\n\n5. **AI/ML Tasks**: \n - Build models for classification, regression, or NLP. \n - Analyze datasets and generate insights. \n - Deploy models using frameworks like Flask or FastAPI.\n\n### **Limitations**\n- I **can't execute code** or run programs directly. \n- I can't access external systems, files, or live data. \n- I provide **guidance and suggestions**, not guaranteed solutions.\n\n---\n\n**How can I help you?** \nIf you have a specific project, problem, or question (e.g., \"How to build a weather app in Python?\" or \"Fix this SQL query\"), feel free to ask! I’ll walk you through the steps. 😊", + "timestamp": "2025-10-26T06:47:01.787913+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_064923_2820888b.json b/data/conversations/archive_20251026_064923_2820888b.json new file mode 100644 index 0000000..efc820f --- /dev/null +++ b/data/conversations/archive_20251026_064923_2820888b.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_064923_2820888b", + "title": "", + "created_at": "2025-10-26T06:47:12.732213+00:00", + "updated_at": "2025-10-26T06:49:05.777661+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T06:47:12.735915+00:00" + }, + { + "role": "user", + "content": "What is 5x5", + "timestamp": "2025-10-26T06:48:58.489714+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked \"What is 5x5\". Let me think about how to approach this.\n\nFirst, I need to confirm what exactly they're asking. The question is straightforward, but maybe they want a simple multiplication result or something more. Since it's 5 multiplied by 5, the answer is 25. But maybe they're looking for a different interpretation? Like a 5x5 grid or matrix? But given the context, it's most likely a basic math question.\n\nI should start by providing the direct answer: 5 times 5 equals 25. Then, perhaps offer additional context in case they need more information. For example, if they're working on a math problem, they might need to know how to solve it step by step. Or if they're using it in a different context, like a 5x5 matrix in programming or a grid in design.\n\nWait, the user might be a student learning multiplication, so explaining the process could be helpful. But the question is very simple, so maybe they just need the answer. However, to be thorough, I should cover both the basic answer and possible extensions. Also, check if there's any ambiguity. For instance, in some contexts, 5x5 could refer to dimensions, like a 5x5 square, which has an area of 25 units. But again, without more context, the safest answer is 25.\n\nI should make sure the response is clear and concise. Start with the answer, then offer further explanation if needed. Maybe ask if they need more details. That way, the user gets the immediate answer and the option to explore more if necessary.\n\n\nThe result of 5 multiplied by 5 is **25**. \n\nIf you meant something else (like a 5x5 grid, matrix, or other context), feel free to clarify! 😊", + "timestamp": "2025-10-26T06:49:05.777661+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_065520_f3077a13.json b/data/conversations/archive_20251026_065520_f3077a13.json new file mode 100644 index 0000000..0af4102 --- /dev/null +++ b/data/conversations/archive_20251026_065520_f3077a13.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_065520_f3077a13", + "title": "", + "created_at": "2025-10-26T06:49:23.714168+00:00", + "updated_at": "2025-10-26T06:55:07.152876+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T06:49:23.717948+00:00" + }, + { + "role": "user", + "content": "hi there im melvin", + "timestamp": "2025-10-26T06:55:03.874222+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user introduced himself as Melvin. I need to respond in a friendly and welcoming manner. Let me start by acknowledging his name and offering help. Maybe ask how I can assist him. Keep it simple and open-ended so he feels comfortable sharing what he needs. Also, make sure to use proper grammar and a friendly tone. Let me put that together.\n\n\nHello, Melvin! Nice to meet you. How can I assist you today? 😊", + "timestamp": "2025-10-26T06:55:07.152876+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_070313_12321467.json b/data/conversations/archive_20251026_070313_12321467.json new file mode 100644 index 0000000..8579824 --- /dev/null +++ b/data/conversations/archive_20251026_070313_12321467.json @@ -0,0 +1,93 @@ +{ + "id": "archive_20251026_070313_12321467", + "title": "", + "created_at": "2025-10-26T06:55:20.586134+00:00", + "updated_at": "2025-10-26T07:03:10.783874+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T06:55:20.588351+00:00" + }, + { + "role": "user", + "content": "hello", + "timestamp": "2025-10-26T07:00:14.220192+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"hello\". I need to respond in a friendly and helpful manner. Since this is a new conversation, I should start by greeting them back and offering assistance. Maybe ask how I can help. Keep it simple and open-ended. Let me make sure the tone is positive and approachable. Alright, that should work.\n\n\nHello! How can I assist you today? 😊", + "timestamp": "2025-10-26T07:00:17.374525+00:00" + }, + { + "role": "user", + "content": "hello", + "timestamp": "2025-10-26T07:00:21.348214+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"hello\" again. I need to respond appropriately. Since they've already greeted me once, maybe they're testing or just being friendly. I should keep it welcoming but not too repetitive. Let me think of a friendly reply that invites them to ask for help. Maybe add an emoji to keep it light. Alright, \"Hello again! How can I assist you today? 😊\" That sounds good. It's friendly and open-ended.\n\n\nHello again! How can I assist you today? 😊", + "timestamp": "2025-10-26T07:00:23.823935+00:00" + }, + { + "role": "user", + "content": "test without reasoning", + "timestamp": "2025-10-26T07:00:45.340645+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"test without reasoning.\" Let me think about how to respond. They might be looking for a quick test or a challenge without needing a detailed explanation. Since they've been saying \"hello\" twice and now this, maybe they're testing the AI's ability to handle different commands or just want a simple interaction.\n\nI should acknowledge their request and offer a straightforward test. Maybe ask them to provide a question or a task they want to test. Keeping it friendly and open-ended would encourage them to specify what they need. Also, since they mentioned \"without reasoning,\" I should avoid overcomplicating the response. Just a simple prompt to get them started. Let me make sure the reply is clear and inviting.\n\n\nSure! What would you like to test? 😊", + "timestamp": "2025-10-26T07:00:48.914505+00:00" + }, + { + "role": "user", + "content": "Test again", + "timestamp": "2025-10-26T07:01:06.523902+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"Test again.\" Let me think about how to respond.\n\nFirst, I need to understand what they mean by \"test again.\" They might be referring to a previous test or a different kind of test. Since the conversation started with \"hello\" and then \"test without reasoning,\" maybe they want to continue with a test but without the reasoning part. \n\nIn the previous interaction, the user asked to test without reasoning, and I asked what they wanted to test. Now they're saying \"Test again,\" so they might be looking for another test. But I should clarify to make sure I'm on the same page. \n\nI should respond in a friendly and open manner, asking what specific test they have in mind. That way, I can provide the right kind of assistance. Also, keeping the tone positive and helpful is important. \n\nMaybe they want a different type of test, like a quiz, a logic puzzle, or something else. I need to prompt them to specify so I can help effectively. Let me make sure my response is clear and invites them to explain what they need.\n\n\nSure! What kind of test would you like to take? 😊", + "timestamp": "2025-10-26T07:01:11.584794+00:00" + }, + { + "role": "user", + "content": "hello", + "timestamp": "2025-10-26T07:01:16.187153+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"hello\" again. Let me check the conversation history.\n\nLooking back, the user started with \"hello,\" then repeated it a couple of times. The assistant responded each time with a friendly greeting and an offer to help. Then the user asked for a test, and the assistant asked what kind of test. The user said \"test again,\" and the assistant asked for more details. Now the user is back to \"hello.\"\n\nHmm, maybe the user is testing how the assistant responds to repeated greetings. Or perhaps they're just being polite and want to start a new conversation. Since the user has been asking for tests and then going back to greetings, maybe they're not sure what they want or are experimenting.\n\nThe assistant should keep the tone friendly and open. Since the user might be unsure, offering assistance again makes sense. Maybe the user is looking for help with something specific but hasn't clarified yet. The assistant should stay welcoming and ready to help with whatever the user needs, whether it's a test, a question, or just a chat.\n\n\nHello again! How can I assist you today? 😊", + "timestamp": "2025-10-26T07:01:21.024279+00:00" + }, + { + "role": "user", + "content": "Why do you think", + "timestamp": "2025-10-26T07:01:29.563091+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked \"Why do you think...\" and I need to respond. Let me see. They might be asking for my opinion on something, but the question is incomplete. Maybe they meant to ask why I think something is a certain way. Since the conversation history shows they've been testing and saying hello, perhaps they're just testing how I respond to incomplete questions.\n\nI should acknowledge their question and prompt them to complete it. Keep it friendly and open-ended. Let them know I'm here to help once they provide more details. Make sure the response is encouraging and not too formal. Maybe add an emoji to keep the tone light. Yeah, that should work.\n\n\nHello! It seems like your question is incomplete. Could you clarify what you're asking? I'm here to help! 😊", + "timestamp": "2025-10-26T07:01:33.140307+00:00" + }, + { + "role": "user", + "content": "Test", + "timestamp": "2025-10-26T07:02:45.642647+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"Test.\" Let me think about how to respond.\n\nFirst, the conversation history shows that the user has been asking for tests multiple times. They started with \"hello,\" then \"test without reasoning,\" \"Test again,\" and now \"Test.\" The assistant has been responding with prompts for more details each time. \n\nThe user might be looking for a specific type of test, but they haven't provided enough information. They could be testing the assistant's ability to handle repeated requests or maybe they're trying to see how the assistant responds to different inputs. \n\nSince the user hasn't specified the type of test, the assistant should ask for clarification. However, the user might be expecting a test in the form of a quiz or a challenge. Maybe they want to see if the assistant can generate a test or answer questions. \n\nThe assistant should remain helpful and open-ended. It's important to prompt the user to provide more details so that the assistant can offer the best possible assistance. The previous responses have been friendly and encouraging, so maintaining that tone is key. \n\nI should respond by asking the user to specify the type of test they're interested in. That way, the assistant can tailor the response appropriately. It's also good to keep the conversation engaging and positive.\n\n\nSure! What type of test would you like to take? 😊 (e.g., a quiz, a logic puzzle, a trivia game, or something else?)", + "timestamp": "2025-10-26T07:02:51.221636+00:00" + }, + { + "role": "user", + "content": "Test", + "timestamp": "2025-10-26T07:03:04.498780+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"Test\" again. Let me check the conversation history to understand the context better.\n\nThe user started with \"hello,\" then repeated it a few times. They asked \"Why do you think\" and then \"Test\" again. The assistant has been responding with prompts to clarify what kind of test they want. Now, the user is just saying \"Test\" again, so they might be expecting a specific test or quiz. \n\nSince the user hasn't provided any details about the type of test, the assistant should continue to ask for clarification. Maybe the user is testing the assistant's ability to handle repeated prompts or is looking for a specific kind of test they haven't mentioned yet. \n\nI need to make sure the response is friendly and encourages the user to specify the type of test they want. It's possible they might be unsure themselves or just want to see the assistant's response. \n\nI should keep the tone positive and open-ended, offering examples of different tests to guide them. That way, the user can choose what they're interested in. \n\nAlso, considering the previous interactions, the user might be testing the assistant's patience or just looking for a simple response. So, the assistant should remain helpful and not get frustrated. \n\nIn summary, the best approach is to ask the user to specify the type of test they want, providing examples to help them decide. This keeps the conversation moving forward and ensures the user gets the assistance they need.\n\n\nSure! What type of test would you like to take? 😊 (e.g., a quiz, a logic puzzle, a trivia game, or something else?)", + "timestamp": "2025-10-26T07:03:10.783874+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_070439_8a492c00.json b/data/conversations/archive_20251026_070439_8a492c00.json new file mode 100644 index 0000000..3ddfc6d --- /dev/null +++ b/data/conversations/archive_20251026_070439_8a492c00.json @@ -0,0 +1,43 @@ +{ + "id": "archive_20251026_070439_8a492c00", + "title": "", + "created_at": "2025-10-26T07:03:13.346566+00:00", + "updated_at": "2025-10-26T07:04:31.711614+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:03:13.350732+00:00" + }, + { + "role": "user", + "content": "Think", + "timestamp": "2025-10-26T07:03:21.818846+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"Think.\" I need to figure out how to respond appropriately. Since they didn't provide a specific question or topic, I should prompt them to clarify what they need help with. Maybe they're trying to get me to think about something, but without more context, it's hard to know. I should keep the response friendly and open-ended to encourage them to provide more details. Let me make sure I'm not making assumptions and that I'm inviting them to specify their request.\n\n\nSure! Could you clarify what you're thinking about or what you'd like help with? I'm here to assist with any questions or topics you'd like to explore. 😊", + "timestamp": "2025-10-26T07:03:24.598739+00:00" + }, + { + "role": "user", + "content": "Dont think", + "timestamp": "2025-10-26T07:03:28.434716+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"Dont think\" after I asked them to clarify what they were thinking about. They might be indicating they don't want to think about something or are avoiding the topic. I need to respond in a way that's supportive and not pushy.\n\nFirst, I should acknowledge their response without forcing them to explain. Maybe offer alternative ways to help. Let them know I'm here if they're ready to talk. Keep the tone friendly and open-ended. Avoid any pressure to elaborate. Maybe use an emoji to keep it light. Let them know I'm available when they're comfortable. Make sure the response is concise and not too long. Check for any possible misunderstandings. Ensure the reply is encouraging and non-judgmental. Maybe suggest they can ask anything they're curious about. Keep the door open for them to reach out again. Avoid any assumptions about their feelings or thoughts. Just provide a safe space for them to engage if they want.\n\n\nNo problem! If you ever feel ready to chat, think, or explore something, I'm here to help. You've got this! 😊 Let me know how I can support you.", + "timestamp": "2025-10-26T07:03:32.966569+00:00" + }, + { + "role": "user", + "content": "test", + "timestamp": "2025-10-26T07:04:26.514192+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"test\". That's pretty vague. I need to figure out what they're trying to test. Maybe they want to see how I respond to simple commands or if I can handle unexpected inputs.\n\nFirst, I should acknowledge their message. \"Test\" could be a way to check if I'm active or if I can process their input. I should respond in a friendly and open manner to encourage them to elaborate.\n\nI should keep the response light and inviting. Maybe add an emoji to keep the tone positive. Let them know I'm here to help with any specific tests they have in mind. That way, they feel supported and are more likely to provide more details if needed.\n\nAlso, since the conversation started with \"Think\" and then \"Dont think\", maybe they're testing different modes or responses. I should stay flexible and ready to assist regardless of their intent. Just make sure the reply is encouraging and open-ended.\n\n\nHey there! 😊 What kind of test are you thinking of? Whether it's a quick question, a fun challenge, or something else, I'm here to help! Let me know how I can assist. πŸš€", + "timestamp": "2025-10-26T07:04:31.711614+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_070825_37517bf3.json b/data/conversations/archive_20251026_070825_37517bf3.json new file mode 100644 index 0000000..c0ed617 --- /dev/null +++ b/data/conversations/archive_20251026_070825_37517bf3.json @@ -0,0 +1,53 @@ +{ + "id": "archive_20251026_070825_37517bf3", + "title": "", + "created_at": "2025-10-26T07:04:39.162007+00:00", + "updated_at": "2025-10-26T07:08:22.567377+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:04:39.165911+00:00" + }, + { + "role": "user", + "content": "Hello", + "timestamp": "2025-10-26T07:05:56.033901+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user said \"Hello\". I need to respond appropriately. Since it's a new conversation, I should greet them back and offer assistance. Let me keep it friendly and open-ended. Maybe ask how I can help them today. That should set a good tone for the interaction.\n\n\nHello! How can I assist you today? 😊", + "timestamp": "2025-10-26T07:05:57.548688+00:00" + }, + { + "role": "user", + "content": "TEst", + "timestamp": "2025-10-26T07:06:00.333836+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just sent \"TEst\". Let me think about how to respond. They might be testing if I'm active or if I can handle typos. Since the previous message was \"Hello\" and the response was friendly, maybe they want to continue the conversation.\n\nI should acknowledge their message in a friendly way. Maybe point out the typo and offer help. That way, I'm being helpful and also showing I'm attentive. Let me check if there's anything else they might need. They could be starting a new topic, so keeping the door open for further questions is good. Keep the tone positive and approachable. Alright, let's go with that.\n\n\nLooks like you might have meant to type something else! 😊 Let me know how I can assist youβ€”whether it's answering questions, helping with tasks, or just chatting. I'm here for you! 🌟", + "timestamp": "2025-10-26T07:06:03.951031+00:00" + }, + { + "role": "user", + "content": "Test", + "timestamp": "2025-10-26T07:06:10.689911+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just typed \"Test\" again. They might be testing the system or checking if I respond. I should acknowledge their message in a friendly way. Maybe add an emoji to keep it light. Let me make sure to invite them to ask anything they need help with. Keep the tone positive and helpful.\n\n\nHello again! 😊 It looks like you're testing me, but I'm here to help with anything you needβ€”whether it's answering questions, solving problems, or just having a chat. What can I assist you with today? 🌟", + "timestamp": "2025-10-26T07:06:13.042301+00:00" + }, + { + "role": "user", + "content": "Test", + "timestamp": "2025-10-26T07:08:20.033166+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user keeps sending \"Test\" messages. Maybe they're checking if I'm responsive or testing my replies. I should acknowledge their messages in a friendly way. Let me make sure to keep the tone positive and open for them to ask for help. I'll respond with a cheerful message encouraging them to ask anything they need.\n\n\nHello again! 😊 It seems like you're just testing me, but I'm here to help with anything you needβ€”whether it's answering questions, solving problems, or just having a chat. What can I assist you with today? 🌟", + "timestamp": "2025-10-26T07:08:22.567377+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_071216_a2983057.json b/data/conversations/archive_20251026_071216_a2983057.json new file mode 100644 index 0000000..d810671 --- /dev/null +++ b/data/conversations/archive_20251026_071216_a2983057.json @@ -0,0 +1,33 @@ +{ + "id": "archive_20251026_071216_a2983057", + "title": "", + "created_at": "2025-10-26T07:08:25.864839+00:00", + "updated_at": "2025-10-26T07:08:58.830972+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:08:25.867211+00:00" + }, + { + "role": "user", + "content": "Hello there", + "timestamp": "2025-10-26T07:08:27.864602+00:00" + }, + { + "role": "assistant", + "content": "\n\n\nHello! How can I assist you today?", + "timestamp": "2025-10-26T07:08:28.248345+00:00" + }, + { + "role": "user", + "content": "What is the purpose of UNIX when UNIX-like works fine?", + "timestamp": "2025-10-26T07:08:51.440826+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user is asking about the purpose of UNIX when UNIX-like systems work fine. Let me break this down. First, I need to understand what they mean by UNIX-like. They probably know that systems like Linux and macOS are UNIX-like, meaning they follow the UNIX philosophy but aren't actual UNIX systems.\n\nSo, why would someone still use UNIX? Maybe they're confused about the difference between UNIX and UNIX-like. I should explain that UNIX is a specific operating system developed by AT&T, while UNIX-like refers to systems that emulate UNIX's design principles. \n\nThe user might be thinking that since UNIX-like systems are functional, why bother with the original UNIX. I should highlight the historical context, like UNIX's role in developing modern operating systems. Also, mention that UNIX is still used in certain environments, like servers or specific applications, and that it's recognized by the IEEE for compliance.\n\nThey might also be interested in the standards and certifications. UNIX has strict standards, so systems that are certified as UNIX have certain guarantees. This could be important for enterprises or specific software that requires UNIX compliance.\n\nI should also note that while UNIX-like systems are more common and user-friendly, UNIX itself has its own niche. Maybe the user is looking for reasons why someone would prefer the original UNIX over its derivatives. I need to make sure the answer is clear, covering the distinction, historical significance, and current uses of UNIX.\n\n\nUNIX is a specific operating system developed by AT&T in the 1970s, known for its robust design, portability, and influence on modern computing. While UNIX-like systems (e.g., Linux, macOS) emulate UNIX's design principles, they are not officially UNIX. UNIX remains significant for its adherence to strict standards (e.g., IEEE POSIX), certification, and use in specialized environments (e.g., servers, embedded systems). Its legacy ensures compatibility, reliability, and a foundation for innovation in operating systems.", + "timestamp": "2025-10-26T07:08:58.830972+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_071416_41dd825d.json b/data/conversations/archive_20251026_071416_41dd825d.json new file mode 100644 index 0000000..34c1420 --- /dev/null +++ b/data/conversations/archive_20251026_071416_41dd825d.json @@ -0,0 +1,33 @@ +{ + "id": "archive_20251026_071416_41dd825d", + "title": "", + "created_at": "2025-10-26T07:12:16.271041+00:00", + "updated_at": "2025-10-26T07:12:39.631264+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:12:16.275208+00:00" + }, + { + "role": "user", + "content": "What can you do?\\", + "timestamp": "2025-10-26T07:12:21.247841+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user asked, \"What can you do?\" I need to explain my capabilities clearly. Let me start by outlining the main functions. First, I can answer questions on a wide range of topics. Then, I can help with writing tasks like essays or stories. Also, I can assist with coding and programming problems. Oh, and I can translate languages. Maybe I should mention creating content like articles or social media posts. Oh, and I can help with problem-solving in various fields. Wait, I should also include things like brainstorming ideas or providing explanations. Let me make sure I cover all the key areas without making it too long. Need to keep it friendly and approachable. Alright, that should cover it.\n\n\nI can assist with a wide range of tasks and topics! Here's what I can do:\n\n1. **Answer Questions**: Provide explanations, facts, or insights on almost any subject (science, history, technology, culture, etc.).\n2. **Creative Writing**: Help draft stories, poems, scripts, or even social media content.\n3. **Problem Solving**: Work through math problems, logic puzzles, or coding challenges.\n4. **Language Translation**: Translate text between languages (e.g., English to Spanish, Chinese to French).\n5. **Code Generation/Debugging**: Write or fix code in languages like Python, JavaScript, Java, etc.\n6. **Research Assistance**: Summarize articles, analyze data, or help with academic research.\n7. **Brainstorming**: Generate ideas for projects, business plans, or creative concepts.\n8. **Explanations**: Break down complex topics into simpler terms (e.g., explaining quantum physics or machine learning).\n9. **Personalized Help**: Tailor responses to your specific needs, whether it's learning a skill or planning a trip.\n\nLet me know what you need help with! 😊", + "timestamp": "2025-10-26T07:12:28.563768+00:00" + }, + { + "role": "user", + "content": "Again", + "timestamp": "2025-10-26T07:12:33.430733+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just said \"Again.\" Hmm, they might be referring to something I mentioned earlier. Let me check the conversation history.\n\nThe user first asked, \"What can you do?\" and I listed all the capabilities. Then they replied with \"Again.\" Maybe they want to go over the list again or ask for more details on a specific point. Alternatively, they might be asking for another example or a different task.\n\nI should respond in a friendly way, inviting them to clarify what they need. It's possible they're not sure which service they want or need more examples. I should keep the tone open and helpful, encouraging them to specify their request so I can assist better. Let me make sure to acknowledge their message and offer further help.\n\n\nAh, I see! 😊 If you're looking for a refresher on what I can do, here's a quick recap of my main capabilities: \n\n- **Answer questions** (any topic, from science to pop culture). \n- **Write stories, poems, or scripts** (creative writing). \n- **Solve problems** (math, logic, coding, etc.). \n- **Translate text** between languages. \n- **Generate or debug code** (Python, JavaScript, etc.). \n- **Help with research** or data analysis. \n- **Brainstorm ideas** for projects or plans. \n- **Explain complex topics** in simple terms. \n- **Customize help** to your specific needs. \n\nLet me know what you’d like to explore next! 🌟", + "timestamp": "2025-10-26T07:12:39.631264+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_073127_5e8a9546.json b/data/conversations/archive_20251026_073127_5e8a9546.json new file mode 100644 index 0000000..3757767 --- /dev/null +++ b/data/conversations/archive_20251026_073127_5e8a9546.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_073127_5e8a9546", + "title": "", + "created_at": "2025-10-26T07:14:16.661307+00:00", + "updated_at": "2025-10-26T07:14:22.827962+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:14:16.663958+00:00" + }, + { + "role": "user", + "content": "Test", + "timestamp": "2025-10-26T07:14:20.485642+00:00" + }, + { + "role": "assistant", + "content": "\nOkay, the user just sent \"Test\". I need to respond appropriately. Since it's a simple test, maybe they're checking if I'm active or want to start a conversation. I should acknowledge their message and ask how I can assist them. Keep it friendly and open-ended. Let me make sure the response is clear and invites them to elaborate on what they need help with. Alright, that should cover it.\n\n\nHello! It seems like you're testing me. How can I assist you today? 😊", + "timestamp": "2025-10-26T07:14:22.827962+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_073238_0462b871.json b/data/conversations/archive_20251026_073238_0462b871.json new file mode 100644 index 0000000..ba93bd2 --- /dev/null +++ b/data/conversations/archive_20251026_073238_0462b871.json @@ -0,0 +1,33 @@ +{ + "id": "archive_20251026_073238_0462b871", + "title": "", + "created_at": "2025-10-26T07:31:27.416134+00:00", + "updated_at": "2025-10-26T07:32:07.012478+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:31:27.418369+00:00" + }, + { + "role": "user", + "content": "Hello there!", + "timestamp": "2025-10-26T07:31:31.439628+00:00" + }, + { + "role": "assistant", + "content": "Hello! 🌟 How can I assist you today? 😊", + "timestamp": "2025-10-26T07:31:48.207066+00:00" + }, + { + "role": "user", + "content": "What can you do?", + "timestamp": "2025-10-26T07:32:01.831359+00:00" + }, + { + "role": "assistant", + "content": "Hi there! πŸ‘‹ I'm really excited to tell you what I can do! πŸš€\n\nI'm here to help with all sorts of thingsβ€”whether you're looking for:\n\nπŸ”Ή **Answering questions** – From science and history to tech and pop culture! \nπŸ”Ή **Creative writing** – Help you write stories, poems, scripts, or even emails. \nπŸ”Ή **Problem-solving** – Math, logic puzzles, coding challenges, or planning strategies. \nπŸ”Ή **Learning & studying** – Summarize topics, explain concepts, or help with homework. \nπŸ”Ή **Language practice** – Translate, write in different languages, or improve your skills. \nπŸ”Ή **Programming help** – Code suggestions, debugging, or learning new languages. \nπŸ”Ή **Daily tasks** – Make to-do lists, organize schedules, or brainstorm ideas. \nπŸ”Ή **Emotional support** – Just chat, share thoughts, or talk through anything on your mind (with care and empathy!).\n\nI'm not perfectβ€”no magic wandβ€”but I'm always here to help you think, create, and explore. πŸ’‘\n\nSo what would you like to work on today? 😊✨", + "timestamp": "2025-10-26T07:32:07.012478+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_073517_b1adec2f.json b/data/conversations/archive_20251026_073517_b1adec2f.json new file mode 100644 index 0000000..e69dfc9 --- /dev/null +++ b/data/conversations/archive_20251026_073517_b1adec2f.json @@ -0,0 +1,53 @@ +{ + "id": "archive_20251026_073517_b1adec2f", + "title": "", + "created_at": "2025-10-26T07:32:38.950851+00:00", + "updated_at": "2025-10-26T07:34:22.390388+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:32:38.955297+00:00" + }, + { + "role": "user", + "content": "How are you doing today?", + "timestamp": "2025-10-26T07:32:45.446826+00:00" + }, + { + "role": "assistant", + "content": "I'm doing great! 😊 I'm always here to help with whatever you needβ€”whether it's answering questions, brainstorming ideas, or just chatting. How can I assist you today?", + "timestamp": "2025-10-26T07:32:53.923968+00:00" + }, + { + "role": "user", + "content": "What? /think", + "timestamp": "2025-10-26T07:33:08.742179+00:00" + }, + { + "role": "assistant", + "content": "Ah, I see! πŸ˜„ You're probably thinking: *\"What? /think\"* β€” maybe you meant to say **\"What? I think...\"** or **\"What? / think [something]\"**? \n\nNo worries at all! I'm here to help you clarify. Could you tell me: \n- What you were trying to say? \n- Or what you’d like to *think* about? (Like a question, idea, or just a quick thought?) \n\nJust say the words, and I’ll help you work through it β€” no pressure, no judgment! πŸ’‘ \nWhat’s on your mind? 🌟", + "timestamp": "2025-10-26T07:33:18.998440+00:00" + }, + { + "role": "user", + "content": "Hello there", + "timestamp": "2025-10-26T07:33:55.741794+00:00" + }, + { + "role": "assistant", + "content": "Hello! 😊 So glad you're hereβ€”hope your day is bright and full of good vibes. What's on your mind today? I'm all ears! 🌟", + "timestamp": "2025-10-26T07:34:04.592844+00:00" + }, + { + "role": "user", + "content": "Was", + "timestamp": "2025-10-26T07:34:12.781334+00:00" + }, + { + "role": "assistant", + "content": "Ah, \"Was\" β€” that’s a fun little fragment! πŸ˜„ \nIs this part of a sentence you were thinking of? Like: \n- *\"Was that helpful?\"* \n- *\"Was this what you meant?\"* \n- Or just a quick thought in the middle of something? \n\nNo worries at all β€” I’m here to help you untangle it! Just say what’s on your mind, and I’ll match you there. πŸ’¬βœ¨ \n\nWhat’s the *full* thought behind \"Was\"? 😊", + "timestamp": "2025-10-26T07:34:22.390388+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_074254_7e273f2b.json b/data/conversations/archive_20251026_074254_7e273f2b.json new file mode 100644 index 0000000..ea5ea38 --- /dev/null +++ b/data/conversations/archive_20251026_074254_7e273f2b.json @@ -0,0 +1,83 @@ +{ + "id": "archive_20251026_074254_7e273f2b", + "title": "", + "created_at": "2025-10-26T07:35:17.692597+00:00", + "updated_at": "2025-10-26T07:42:49.146793+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:35:17.695093+00:00" + }, + { + "role": "user", + "content": "Hello", + "timestamp": "2025-10-26T07:35:21.460195+00:00" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today? 😊", + "timestamp": "2025-10-26T07:35:25.515467+00:00" + }, + { + "role": "user", + "content": "Ask a questionHello", + "timestamp": "2025-10-26T07:35:30.996436+00:00" + }, + { + "role": "assistant", + "content": "What's something you'd like to learn about today? 😊", + "timestamp": "2025-10-26T07:35:47.215863+00:00" + }, + { + "role": "user", + "content": "about python", + "timestamp": "2025-10-26T07:35:53.243737+00:00" + }, + { + "role": "assistant", + "content": "Great! Python is a super popular programming language that's easy to learn and super versatile. 😊 \n\n**Here's a quick question to help me tailor my answer:** \n> *What aspect of Python are you curious about?* \n*(For example: beginner basics, web development, data science, AI, automation, or something else?)* \n\nJust tell me what interests you, and I’ll give you a clear, friendly explanation! 🐍", + "timestamp": "2025-10-26T07:36:02.475419+00:00" + }, + { + "role": "user", + "content": "test", + "timestamp": "2025-10-26T07:36:07.763701+00:00" + }, + { + "role": "assistant", + "content": "Hmm, \"test\" is a great start! 😊 \n\nSince you said \"test,\" here are a few possible directions we could go β€” just pick what interests you most:\n\n1. **Python Testing (like unit testing)** – How to write tests for your code using `unittest` or `pytest`. \n2. **Python Test Environment** – How to set up a testing environment or run scripts. \n3. **Test a Python Script** – Want to see a simple example of running a test? \n4. **\"Test\" as in Python's `test` module** – Like how Python handles testing internally. \n\nOr maybe you just meant \"test\" as in \"try it out\"? \n\nπŸ‘‰ Could you clarify what you're really asking about? I'm here to help! πŸš€", + "timestamp": "2025-10-26T07:36:12.983291+00:00" + }, + { + "role": "user", + "content": "Think please", + "timestamp": "2025-10-26T07:36:18.387358+00:00" + }, + { + "role": "assistant", + "content": "Got it! πŸ˜„ I'll *think through* something practical for you based on your \"test\" interest and keep it super simple. \n\n**Here’s what I’ll do:** \nI’ll write a **one-liner Python test** you can run *right now* in your terminal (no setup needed). It’s the simplest test possible β€” and it’ll prove I’m thinking! \n\n```python\nprint(\"Hello, I’m thinking for you!\") # πŸ‘€ This is a test!\n```\n\n**Run it** in your terminal: \n```bash\npython -c \"print('Hello, I’m thinking for you!')\"\n```\n\nβœ… **Why this works**: \n- It’s a *real* test (it checks if the code runs without errors). \n- It’s *instant* (no dependencies, no installation). \n- It’s *fun* (it literally says \"I’m thinking for you!\"). \n\nYou’re welcome! 🐍✨ \nWhat would you like to try next? 😊", + "timestamp": "2025-10-26T07:36:33.374805+00:00" + }, + { + "role": "user", + "content": "hello", + "timestamp": "2025-10-26T07:38:30.307131+00:00" + }, + { + "role": "assistant", + "content": "Hello again! 😊 It’s great to see you back. \n\nI’m here to help with **any** Python questionβ€”whether it’s a quick example, a concept you’re confused about, or just something fun to try. \n\nWhat’s on your mind today? 🐍✨ \n\n*(P.S. You can even say \"test\" again if you want to run another tiny Python experiment!)*", + "timestamp": "2025-10-26T07:38:38.807487+00:00" + }, + { + "role": "user", + "content": "Hello", + "timestamp": "2025-10-26T07:42:41.121466+00:00" + }, + { + "role": "assistant", + "content": "Hello! 🌟 Just a little hello to brighten your day. 😊 \n\nWhat would you like to explore or try today? (Python, a fun fact, a quick code snippet, or just a chat?) I'm all ears! πŸ’¬", + "timestamp": "2025-10-26T07:42:49.146793+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_074828_f11452f2.json b/data/conversations/archive_20251026_074828_f11452f2.json new file mode 100644 index 0000000..3e0fe42 --- /dev/null +++ b/data/conversations/archive_20251026_074828_f11452f2.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_074828_f11452f2", + "title": "", + "created_at": "2025-10-26T07:42:54.664856+00:00", + "updated_at": "2025-10-26T07:43:03.637045+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:42:54.668757+00:00" + }, + { + "role": "user", + "content": "Hello", + "timestamp": "2025-10-26T07:42:59.073389+00:00" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today? 😊", + "timestamp": "2025-10-26T07:43:03.637045+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_075259_179ebacd.json b/data/conversations/archive_20251026_075259_179ebacd.json new file mode 100644 index 0000000..92a9306 --- /dev/null +++ b/data/conversations/archive_20251026_075259_179ebacd.json @@ -0,0 +1,43 @@ +{ + "id": "archive_20251026_075259_179ebacd", + "title": "", + "created_at": "2025-10-26T07:48:28.269692+00:00", + "updated_at": "2025-10-26T07:52:43.226134+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:48:28.271652+00:00" + }, + { + "role": "user", + "content": "What can you do?", + "timestamp": "2025-10-26T07:48:31.685639+00:00" + }, + { + "role": "assistant", + "content": "Hey there! πŸ‘‹ I'm Qwen, and I'm really excited to help you out! Here's what I can do for you:\n\n- ✨ **Answer questions** (science, tech, history, you name it!)\n- πŸ“ **Write stories, emails, scripts, and even poems** - just tell me what you need!\n- πŸ’‘ **Do logical reasoning** and solve puzzles\n- 🧠 **Explain complex topics** in simple terms\n- πŸ’» **Write and debug code** in many programming languages\n- 🌍 **Translate languages** (I know a lot of them!)\n- 🎲 **Play games** with you (riddles, word games, etc.)\n- πŸ€” **Brainstorm ideas** for projects, businesses, or creative stuff\n\nI'm here to help with anything you need! What would you like to try today? 😊", + "timestamp": "2025-10-26T07:48:45.333325+00:00" + }, + { + "role": "user", + "content": "Hello tere", + "timestamp": "2025-10-26T07:49:06.605433+00:00" + }, + { + "role": "assistant", + "content": "Hello there! 😊 How can I help you today?", + "timestamp": "2025-10-26T07:49:18.385029+00:00" + }, + { + "role": "user", + "content": "Test", + "timestamp": "2025-10-26T07:52:37.259353+00:00" + }, + { + "role": "assistant", + "content": "Test? πŸ˜„ Let me know how you'd like to test me! A question? A story? A game? I'm all ears!", + "timestamp": "2025-10-26T07:52:43.226134+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_075756_53b92057.json b/data/conversations/archive_20251026_075756_53b92057.json new file mode 100644 index 0000000..a5be1b8 --- /dev/null +++ b/data/conversations/archive_20251026_075756_53b92057.json @@ -0,0 +1,43 @@ +{ + "id": "archive_20251026_075756_53b92057", + "title": "", + "created_at": "2025-10-26T07:52:59.714612+00:00", + "updated_at": "2025-10-26T07:55:49.231096+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:52:59.716843+00:00" + }, + { + "role": "user", + "content": "How are you", + "timestamp": "2025-10-26T07:53:03.474599+00:00" + }, + { + "role": "assistant", + "content": "I'm doing well, thank you! I'm here to help with any questions or tasks you might have. How can I assist you today? 😊", + "timestamp": "2025-10-26T07:53:10.115769+00:00" + }, + { + "role": "user", + "content": "Hello there", + "timestamp": "2025-10-26T07:55:19.057072+00:00" + }, + { + "role": "assistant", + "content": "Hello! 😊 It's great to see you again. How can I help you today?", + "timestamp": "2025-10-26T07:55:21.666512+00:00" + }, + { + "role": "user", + "content": "What is the purpose of python", + "timestamp": "2025-10-26T07:55:29.257049+00:00" + }, + { + "role": "assistant", + "content": "That's a great question! 😊 **Python's primary purpose** is to be a **versatile, high-level programming language** designed for **readability, simplicity, and productivity**. Here’s a clear breakdown of its key purposes:\n\n---\n\n### 🌟 Core Purposes of Python:\n1. **General-Purpose Programming** \n Python can solve problems across *any* domain: \n β†’ Web development (Django, Flask) \n β†’ Data science & AI (Pandas, TensorFlow, PyTorch) \n β†’ Automation (scripting tasks, file handling) \n β†’ Scientific computing (NumPy, SciPy) \n β†’ Education (easy syntax for beginners) \n β†’ Cloud services (AWS, Google Cloud) \n β†’ And more! \n\n2. **Readability & Simplicity** \n Python uses clean, human-readable syntax (e.g., `print(\"Hello\")` instead of `printf(\"Hello\");`). This makes it: \n β†’ Easy to learn for beginners \n β†’ Faster to write and debug \n β†’ Great for collaborative projects \n\n3. **Community & Ecosystem** \n Python has **millions of libraries** (like `requests` for APIs, `matplotlib` for data visualization) that let developers solve complex problems *without* reinventing the wheel. \n\n4. **Cross-Platform Compatibility** \n Python runs on Windows, macOS, Linux, and even embedded systems β€” no extra setup needed!\n\n---\n\n### πŸ’‘ Why Python Stands Out:\n- **\"Batteries included\"**: Comes with built-in tools for common tasks (e.g., file I/O, networking). \n- **Used by giants**: Google, Netflix, Spotify, NASA, and major tech companies rely on Python. \n- **Not just for beginners**: Professionals use it for AI/ML, big data, and enterprise apps.\n\n---\n\n### 🚫 Common Misconceptions:\n- ❌ *\"Python is only for beginners.\"* \n β†’ **Truth**: It’s used by experts for complex systems (e.g., AI models, financial trading). \n- ❌ *\"Python is slow.\"* \n β†’ **Truth**: It’s fast *for its simplicity* and often outperforms other languages in specific tasks (like data analysis).\n\n---\n\n### In a nutshell: \n> **Python exists to let developers write clear, efficient code quickly β€” whether they’re building a simple script, a web app, or an AI model.** \n\nIf you’re curious about a specific use case (like \"How do I use Python for data analysis?\" or \"Can Python make websites?\"), just ask! 😊 I’d love to help you dive deeper. What interests you most?", + "timestamp": "2025-10-26T07:55:49.231096+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_080206_48becd70.json b/data/conversations/archive_20251026_080206_48becd70.json new file mode 100644 index 0000000..5d93367 --- /dev/null +++ b/data/conversations/archive_20251026_080206_48becd70.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_080206_48becd70", + "title": "", + "created_at": "2025-10-26T07:57:56.655186+00:00", + "updated_at": "2025-10-26T07:58:54.666964+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T07:57:56.659242+00:00" + }, + { + "role": "user", + "content": "Hello there", + "timestamp": "2025-10-26T07:58:47.110776+00:00" + }, + { + "role": "assistant", + "content": "Okay, the user said \"Hello there\". That's a simple greeting. I need to respond warmly and professionally. Let me think about the best way to start.\n\nFirst, I should acknowledge their greeting. Maybe say something like \"Hello! I'm glad you're here.\" That's friendly and opens the door for further conversation.\n\nI should check if they need help with anything. People often greet and then have a question or need assistance. So adding a question like \"How can I assist you today?\" would be good.\n\nWait, the user might be testing if I'm a real assistant. I should make sure my response is clear and not too robotic. Avoid using markdown, keep it conversational.\n\nAlso, since it's a new conversation, I don't have any context from previous messages. Just need to be straightforward.\n\nLet me put it all together: \"Hello! I'm glad you're here. How can I assist you today?\" That sounds natural and helpful.\n\nHmm, maybe add a smiley to make it more friendly? Like \"😊\". But the user didn't specify, so maybe better to keep it simple. Wait, the instructions say not to use markdown, so emojis might be okay if they're plain. Let me check previous examples. In the problem statement, they mentioned not to use markdown, but emojis are usually allowed in text. I'll include a smiley to add warmth.\n\nWait, the user's message is \"Hello there\" which is a bit casual. So a friendly response with a smiley is appropriate.\n\nSo final response: \"Hello! 😊 How can I assist you today?\"\n\nYes, that's good. It's friendly, clear, and invites them to ask for help.\n\nHello! 😊 How can I assist you today?", + "timestamp": "2025-10-26T07:58:54.666964+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/archive_20251026_080452_8beb9b71.json b/data/conversations/archive_20251026_080452_8beb9b71.json new file mode 100644 index 0000000..038abd2 --- /dev/null +++ b/data/conversations/archive_20251026_080452_8beb9b71.json @@ -0,0 +1,23 @@ +{ + "id": "archive_20251026_080452_8beb9b71", + "title": "", + "created_at": "2025-10-26T08:02:06.172327+00:00", + "updated_at": "2025-10-26T08:03:07.103894+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T08:02:06.175348+00:00" + }, + { + "role": "user", + "content": "Hello there", + "timestamp": "2025-10-26T08:03:02.867925+00:00" + }, + { + "role": "assistant", + "content": "Okay, the user said \"Hello there\". That's a simple greeting. I need to respond in a friendly and welcoming way.\n\nFirst, I should acknowledge their greeting. Maybe add a smiley to keep it warm. Let me think... \"Hello! 😊 How can I help you today?\" That sounds good. It's open-ended and invites them to ask questions or share what they need.\n\nWait, the user might be testing if I'm working or just starting a conversation. Since they said \"New conversation started. Previous conversation archived.\", I should make sure my response is clear and not too robotic.\n\nI should avoid any markdown formatting as per the instructions. Just plain text with a smiley. Let me check: \"Hello! 😊 How can I help you today?\" Yes, that's straightforward and friendly.\n\nIs there anything else they might need? Maybe they want to know about my capabilities, but since they just said hello, it's better to keep it simple. Don't overload them with info right away.\n\nI think that's a good response. Let me write it out.\n\nHello! 😊 How can I help you today?", + "timestamp": "2025-10-26T08:03:07.103894+00:00" + } + ] +} \ No newline at end of file diff --git a/data/conversations/default.json b/data/conversations/default.json new file mode 100644 index 0000000..e989fee --- /dev/null +++ b/data/conversations/default.json @@ -0,0 +1,22 @@ +{ + "id": "default", + "created_at": "2025-10-26T08:04:52.666706+00:00", + "updated_at": "2025-10-26T08:05:01.598962+00:00", + "messages": [ + { + "role": "assistant", + "content": "New conversation started. Previous conversation archived.", + "timestamp": "2025-10-26T08:04:52.669101+00:00" + }, + { + "role": "user", + "content": "Hello there", + "timestamp": "2025-10-26T08:04:54.458503+00:00" + }, + { + "role": "assistant", + "content": "Okay, the user said \"Hello there\". That's a friendly greeting. I should respond in a warm and welcoming way. Let me think about how to phrase it.\n\nFirst, I want to acknowledge their greeting. Maybe something like \"Hello! πŸ‘‹ How can I assist you today?\" That's friendly and open-ended. Adding an emoji might make it more approachable.\n\nWait, the user might be testing if I'm responsive. Since it's a new conversation, I should make sure to be clear and helpful. Let me check if there's any specific context I need to consider. The previous conversation is archived, so no prior context.\n\nI should avoid any technical jargon. Keep it simple and conversational. Maybe add a question to prompt them to tell me what they need. Like \"What would you like to discuss or work on today?\" Hmm, but the user might not know what they need yet. The first response should be open-ended.\n\nLet me see examples. In many chatbots, they start with a greeting and a question. For example: \"Hi there! How can I help you today?\" That's good. Adding a smiley emoji could be nice. Let me go with \"Hello! 😊 How can I assist you today?\"\n\nWait, the user said \"Hello there\", so maybe a slightly more casual tone. \"Hello there! πŸ‘‹ How can I help you today?\" Yes, that's good. It's friendly and matches their phrasing.\n\nI should avoid being too formal. Keep it natural. Let me make sure there's no typo. \"Hello there!\" with an exclamation mark. The emoji is a smiley, which is common in such contexts.\n\nI think that's a good response. It's warm, acknowledges their greeting, and invites them to share what they need. No need for extra information since it's the first message. Alright, that should work.\n\nHello there! πŸ‘‹ How can I assist you today? 😊", + "timestamp": "2025-10-26T08:05:01.598962+00:00" + } + ] +} \ No newline at end of file diff --git a/inspiration/quickcenter/__init__.py b/inspiration/quickcenter/__init__.py deleted file mode 100644 index aae32ed..0000000 --- a/inspiration/quickcenter/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .quickcenter import QuickCenter - -__all__ = [ - "QuickCenter", -] \ No newline at end of file diff --git a/inspiration/quickcenter/quickcenter.py b/inspiration/quickcenter/quickcenter.py deleted file mode 100644 index 1576336..0000000 --- a/inspiration/quickcenter/quickcenter.py +++ /dev/null @@ -1,150 +0,0 @@ -from ignis import widgets -from ignis.window_manager import WindowManager -from ignis.services.notifications import NotificationService -from modules.m3components import Button -from .widgets import NotificationCenter, QuickSliders -from user_settings import user_settings -from ignis.services.niri import NiriService - -window_manager = WindowManager.get_default() -notifications = NotificationService.get_default() - - -class QuickCenter(widgets.RevealerWindow): - def open_window(self, window): - window_manager.close_window("QuickCenter") - window_manager.open_window(window) - - def __init__(self): - notification_center = NotificationCenter() - quick_sliders = QuickSliders() - bottom_controls = widgets.Box( - css_classes=["bottom-controls"], - hexpand=True, - halign="fill", - homogeneous=False, - spacing=5, - child=[ - Button.button( - icon="power_settings_new", - halign="start", - hexpand=False, - on_click=lambda x: self.open_window("PowerMenu"), - vexpand=False, - valign="center", - size="xs", - ), - Button.button( - icon="settings", - halign="start", - hexpand=False, - on_click=lambda x: self.open_window("Settings"), - vexpand=False, - valign="center", - size="xs", - ), - Button.button( - icon="clear_all", - label="Clear all", - halign="end", - hexpand=True, - on_click=lambda x: notifications.clear_all(), - css_classes=["notification-clear-all"], - vexpand=True, - valign="center", - size="xs", - visible=notifications.bind( - "notifications", lambda value: len(value) != 0 - ), - ), - ], - ) - - self.content_box = widgets.Box( - vertical=True, - spacing=0, - hexpand=False, - css_classes=["quick-center"], - child=[notification_center, quick_sliders, bottom_controls], - ) - self.content_box.width_request = 400 - - revealer = widgets.Revealer( - child=self.content_box, - transition_duration=300, - ) - - close_button = widgets.Button( - vexpand=True, - hexpand=True, - can_focus=False, - on_click=lambda x: window_manager.close_window("QuickCenter"), - ) - - main_overlay = widgets.Overlay( - css_classes=["popup-close"], - child=close_button, - overlays=[revealer], - ) - - super().__init__( - revealer=revealer, - child=main_overlay, - css_classes=["popup-close"], - hide_on_close=True, - visible=False, - namespace="QuickCenter", - popup=True, - layer="overlay", - kb_mode="exclusive", - anchor=["left", "right", "top", "bottom"], - ) - - self.window_manager = window_manager - self.notification_center = notification_center - self.revealer = revealer - self.actual_content_box = revealer - - self.niri = NiriService.get_default() - self.connect("notify::visible", self._toggle_revealer) - self.update_side() - - def _toggle_revealer(self, *_): - self.revealer.reveal_child = self.visible - - def update_side(self): - position = user_settings.interface.modules - location = position.location.systeminfotray - bar = ( - user_settings.interface.bar - if position.bar_id.systeminfotray == 0 - else user_settings.interface.bar - ) - side = bar.side - if side in ["left", "right"]: - self.actual_content_box.set_halign("start" if side == "left" else "end") - self.actual_content_box.anchor = ["top", "bottom", side] - else: - if location == "center": - self.actual_content_box.set_halign("center") - self.actual_content_box.anchor = ["top", "bottom"] - else: - self.actual_content_box.set_halign("start" if location == 0 else "end") - self.actual_content_box.anchor = [ - "top", - "bottom", - "left" if location == 0 else "end", - ] - - self.revealer.transition_type = "none" - if self.niri and self.niri.is_available: - self.revealer.transition_type = ( - "slide_right" - if self.actual_content_box.halign == "start" - else "slide_left" - ) - self.content_box.set_halign( - "end" if self.actual_content_box.halign == "start" else "end" - ) - - self.actual_content_box.queue_resize() diff --git a/inspiration/quickcenter/widgets/__init__.py b/inspiration/quickcenter/widgets/__init__.py deleted file mode 100644 index 29eb6b0..0000000 --- a/inspiration/quickcenter/widgets/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .notificationcenter import NotificationCenter -from .sliders import QuickSliders - -__all__ = [ - "NotificationCenter", - "QuickSliders" -] diff --git a/inspiration/quickcenter/widgets/notificationcenter.py b/inspiration/quickcenter/widgets/notificationcenter.py deleted file mode 100644 index 751cc17..0000000 --- a/inspiration/quickcenter/widgets/notificationcenter.py +++ /dev/null @@ -1,98 +0,0 @@ -from ignis import widgets, utils -from ignis.services.notifications import Notification, NotificationService -from ignis.window_manager import WindowManager -from gi.repository import GLib, Gtk -from ...notifications import ExoNotification - -notifications = NotificationService.get_default() -window_manager = WindowManager.get_default() - -class Popup(widgets.Revealer): - def __init__(self, notification: Notification, **kwargs): - widget = ExoNotification(notification) - super().__init__(child=widget, transition_type="slide_down", **kwargs) - - notification.connect("closed", lambda x: self.destroy()) - - def destroy(self): - self.reveal_child = False - utils.Timeout(self.transition_duration, self.unparent) - - -class Notifications(widgets.Box): - def __init__(self): - loading_notifications_label = widgets.Label( - label="Loading notifications...", - valign="center", - vexpand=True, - css_classes=["notification-center-info-label"], - ) - - super().__init__( - vertical=True, - child=[loading_notifications_label], - vexpand=True, - css_classes=["notification-center-content"], - spacing=2, - setup=lambda self: notifications.connect( - "notified", - lambda x, notification: self.__on_notified(notification), - ), - ) - - utils.ThreadTask( - self.__load_notifications, - lambda result: self.set_child(result), - ).run() - - def __on_notified(self, notification: Notification) -> None: - notify = Popup(notification) - self.prepend(notify) - notify.reveal_child = True - - def __load_notifications(self) -> list[widgets.Label | Popup]: - contents: list[widgets.Label | Popup] = [] - for i in reversed(notifications.notifications): - GLib.idle_add(lambda i=i: contents.append(Popup(i, reveal_child=True))) - - contents.append( - widgets.Label( - label="notifications_off", - valign="end", - vexpand=True, - css_classes=["notification-center-info-icon"], - visible=notifications.bind( - "notifications", lambda value: len(value) == 0 - ), - ) - ) - contents.append( - widgets.Label( - label="No notifications", - valign="start", - vexpand=True, - css_classes=["notification-center-info-label"], - visible=notifications.bind( - "notifications", lambda value: len(value) == 0 - ), - ) - ) - return contents - - -class NotificationCenter(widgets.Box): - __gtype_name__ = "NotificationCenter" - - def __init__(self): - scroll = widgets.Scroll(child=Notifications(), vexpand=True) - scroll.set_overflow(Gtk.Overflow.HIDDEN) - - super().__init__( - vertical=True, - vexpand=True, - css_classes=["notification-center"], - spacing=10, - child=[ - scroll, - ], - ) diff --git a/inspiration/quickcenter/widgets/sliders.py b/inspiration/quickcenter/widgets/sliders.py deleted file mode 100644 index 785519a..0000000 --- a/inspiration/quickcenter/widgets/sliders.py +++ /dev/null @@ -1,94 +0,0 @@ -import asyncio -from gi.repository import GLib -from ignis import widgets -from ignis.services.audio import AudioService -from ignis.services.backlight import BacklightService -from ignis.window_manager import WindowManager - -audio = AudioService.get_default() -backlight = BacklightService.get_default() -window_manager = WindowManager.get_default() - - -class QuickSliders(widgets.Box): - def __init__(self): - children = [] - if audio.speaker: - self.volume_slider = widgets.Scale( - min=0, - max=100, - step=1.0, - on_change=self.on_volume_changed, - hexpand=True, - ) - volume_box = widgets.Box( - css_classes=["m3-slider"], - child=[ - widgets.Label(label="volume_up", css_classes=["m3-icon"]), - self.volume_slider, - ], - spacing=12, - ) - children.append(volume_box) - - if backlight.available: - self.backlight_slider = widgets.Scale( - min=0, - max=100, - step=1.0, - on_change=self.on_backlight_changed, - hexpand=True, - ) - backlight_box = widgets.Box( - css_classes=["m3-slider"], - child=[ - widgets.Label(label="brightness_6", css_classes=["m3-icon"]), - self.backlight_slider, - ], - spacing=12, - ) - children.append(backlight_box) - - super().__init__( - css_classes=["quick-sliders-container"], - hexpand=True, - halign="fill", - spacing=2, - vertical=True, - child=children, - ) - - if audio.speaker: - audio.speaker.connect("notify::volume", self._on_volume_changed) - audio.speaker.connect("notify::is-muted", self._on_volume_changed) - if backlight.available: - backlight.connect("notify::brightness", self._on_brightness_changed) - - def _on_volume_changed(self, stream, *_): - if stream.is_muted: - self.volume_slider.set_value(0) - else: - self.volume_slider.set_value(stream.volume) - - def _on_brightness_changed(self, backlight, *_): - self.backlight_slider.set_value( - (backlight.brightness / backlight.max_brightness) * 100 - ) - - def on_volume_changed(self, slider): - value = slider.get_value() - self.set_suppress_osd_flag() - audio.speaker.volume = value - - def on_backlight_changed(self, slider): - value = slider.get_value() - self.set_suppress_osd_flag() - backlight.brightness = int((value / 100) * backlight.max_brightness) - - def set_suppress_osd_flag(self): - window_manager.suppress_osd = True - asyncio.create_task(self.reset_suppress_osd_flag()) - - async def reset_suppress_osd_flag(self): - await asyncio.sleep(0.1) - window_manager.suppress_osd = False diff --git a/main.py b/main.py deleted file mode 100644 index fc1f1ae..0000000 --- a/main.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Entry point for the AI sidebar application.""" - -from __future__ import annotations - -import os -import sys - -import gi - -gi.require_version("Gtk", "4.0") -from gi.repository import Gtk # noqa: E402 - -from sidebar_window import SidebarWindow # noqa: E402 - -HEADLESS_ENV_VAR = "AI_SIDEBAR_HEADLESS" -_STARTUP_FAILED = False - - -def main(argv: list[str] | None = None) -> int: - """Launch the GTK application and return the exit status.""" - args = argv or sys.argv - - if os.environ.get(HEADLESS_ENV_VAR) == "1": - print("Headless mode enabled; skipping GTK startup.") - return 0 - - if not Gtk.init_check(): - print( - "Failed to initialize GTK. Ensure a display server is available.", - file=sys.stderr, - ) - return 1 - - if not (os.environ.get("WAYLAND_DISPLAY") or os.environ.get("DISPLAY")): - print( - "No Wayland or X11 display detected. Launch this app inside a graphical session.", - file=sys.stderr, - ) - return 1 - - # Warn if GDK_BACKEND is set to X11 on Wayland systems - if os.environ.get("WAYLAND_DISPLAY") and os.environ.get("GDK_BACKEND") == "x11": - print( - "Warning: GDK_BACKEND is set to 'x11' but you're on Wayland.", - file=sys.stderr, - ) - print( - "GTK4 Layer Shell requires native Wayland. Use './run.sh' instead.", - file=sys.stderr, - ) - - app = Gtk.Application(application_id="ai.sidebar") - app.connect("activate", _on_activate) - status = app.run(args) - return 1 if _STARTUP_FAILED else status - - -def _on_activate(app: Gtk.Application) -> None: - """Create and present the main sidebar window when the app activates.""" - try: - window = SidebarWindow(application=app) - except RuntimeError as exc: - if "Gtk couldn't be initialized" in str(exc): - print( - "Failed to initialize GTK. Ensure a display server is available.", - file=sys.stderr, - ) - global _STARTUP_FAILED - _STARTUP_FAILED = True - app.quit() - return - raise - - window.present() - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/message_widget.py b/message_widget.py deleted file mode 100644 index 4ae6372..0000000 --- a/message_widget.py +++ /dev/null @@ -1,8 +0,0 @@ -"""UI widgets for chat messages.""" - - -class MessageWidget: - """Placeholder message widget to be filled in with GTK constructs.""" - - def __init__(self) -> None: - raise NotImplementedError("Message widget not implemented yet.") diff --git a/ollama_client.py b/ollama_client.py index e057418..a796c26 100644 --- a/ollama_client.py +++ b/ollama_client.py @@ -1,13 +1,11 @@ -"""Client utilities for interacting with the Ollama API.""" +"""Client utilities for interacting with the Ollama API via direct HTTP calls.""" from __future__ import annotations +import json from typing import Any, Dict, Iterable, Iterator - -try: # pragma: no cover - optional dependency may not be installed in CI - import ollama -except ImportError: # pragma: no cover - fallback path for environments without Ollama - ollama = None # type: ignore[assignment] +from urllib.request import Request, urlopen +from urllib.error import URLError, HTTPError class OllamaClientError(RuntimeError): @@ -15,88 +13,60 @@ class OllamaClientError(RuntimeError): class OllamaUnavailableError(OllamaClientError): - """Raised when the Ollama Python SDK is not available.""" + """Raised when the Ollama server is not available.""" class OllamaClient: - """Thin wrapper around the Ollama Python SDK with graceful degradation.""" + """HTTP client for interacting with Ollama's REST API.""" def __init__(self, host: str | None = None) -> None: - self._host = host - self._client = None + self._host = host or "http://localhost:11434" self._cached_models: list[str] | None = None self._is_available = False - - if ollama is None: - return - - # Try to initialize client and check connection - try: - if host and hasattr(ollama, "Client"): - self._client = ollama.Client(host=host) # type: ignore[call-arg] - - # Test connection by attempting to list models - self._check_connection() - except Exception: - # Silently fail - availability flag remains False - pass + + # Check connection during initialization without raising exceptions + self._check_connection() # ------------------------------------------------------------------ helpers def _check_connection(self) -> None: - """Check if Ollama is available and update internal flag.""" - if ollama is None: - self._is_available = False - return - + """Check if Ollama server is reachable and update internal flag.""" try: - # Attempt a simple list call to verify connection - self._call_sdk("list") # type: ignore[arg-type] - self._is_available = True - except Exception: + req = Request(f"{self._host}/api/tags", method="GET") + with urlopen(req, timeout=2) as response: + self._is_available = response.status == 200 + except (URLError, HTTPError, TimeoutError, Exception): self._is_available = False @property def is_available(self) -> bool: + """Check if Ollama server is reachable.""" return self._is_available @property def default_model(self) -> str | None: + """Get the first available model.""" models = self.list_models() return models[0] if models else None def list_models(self, force_refresh: bool = False) -> list[str]: """Return the available model names, caching the result for quick reuse.""" - if not self.is_available: - return [] - if self._cached_models is not None and not force_refresh: return list(self._cached_models) try: - response = self._call_sdk("list") # type: ignore[arg-type] - # Update availability flag on successful call - self._is_available = True - except OllamaClientError: - self._is_available = False - return [] - except Exception: + req = Request(f"{self._host}/api/tags", method="GET") + with urlopen(req, timeout=5) as response: + data = json.loads(response.read().decode()) + # Update availability flag on successful call + self._is_available = True + except (URLError, HTTPError, TimeoutError, Exception): + # Return empty list instead of raising on connection failure self._is_available = False return [] models: list[str] = [] - # Handle both dict responses (old SDK) and Pydantic objects (new SDK) - if isinstance(response, dict): - model_list = response.get("models", []) - else: - # Pydantic object - model_list = getattr(response, "models", []) - - for item in model_list: - if isinstance(item, dict): - name = item.get("name") or item.get("model") - else: - # Pydantic object - name = getattr(item, "name", None) or getattr(item, "model", None) + for item in data.get("models", []): + name = item.get("name") or item.get("model") if name: models.append(name) @@ -109,98 +79,145 @@ class OllamaClient: *, model: str, messages: Iterable[Dict[str, str]], + options: Dict[str, Any] | None = None, ) -> dict[str, str] | None: """Execute a blocking chat call against Ollama.""" - if not self.is_available: - if ollama is None: - return { - "role": "assistant", - "content": "Ollama SDK is not installed; install `ollama` to enable responses.", - } - else: - return { - "role": "assistant", - "content": "Ollama is not running. Start Ollama with: ollama serve", - } + # Return error message instead of raising when unavailable + if not self._is_available: + return { + "role": "assistant", + "content": "Ollama is not running. Start Ollama with: ollama serve", + } + + payload = { + "model": model, + "messages": list(messages), + "stream": False, + } + + # Add options if provided + if options: + payload["options"] = options try: - result = self._call_sdk( - "chat", - model=model, - messages=list(messages), - stream=False, + req = Request( + f"{self._host}/api/chat", + data=json.dumps(payload).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method="POST", ) - # Update availability flag on successful call - self._is_available = True - except OllamaClientError as exc: - self._is_available = False - return { - "role": "assistant", - "content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve", - } - except Exception as exc: + with urlopen(req, timeout=120) as response: + result = json.loads(response.read().decode()) + # Update availability flag on successful call + self._is_available = True + except (URLError, HTTPError, TimeoutError, Exception) as exc: + # Update availability flag and return error message self._is_available = False return { "role": "assistant", "content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve", } - # Handle both dict responses (old SDK) and Pydantic objects (new SDK) - if isinstance(result, dict): - message = result.get("message") - if not message: - return {"role": "assistant", "content": ""} - role = message.get("role") or "assistant" - content = message.get("content") or "" - else: - # Pydantic object (ollama SDK >= 0.4.0) - message = getattr(result, "message", None) - if not message: - return {"role": "assistant", "content": ""} - role = getattr(message, "role", "assistant") - content = getattr(message, "content", "") + # Parse the response + message = result.get("message") + if not message: + return {"role": "assistant", "content": ""} + + role = message.get("role", "assistant") + content = message.get("content", "") return {"role": role, "content": content} def stream_chat( - self, *, model: str, messages: Iterable[Dict[str, str]] + self, *, model: str, messages: Iterable[Dict[str, str]], options: Dict[str, Any] | None = None ) -> Iterator[dict[str, Any]]: - """Placeholder that exposes the streaming API for future UI hooks.""" - if not self.is_available: - raise OllamaUnavailableError( - "Streaming requires the Ollama Python SDK to be installed." - ) + """Execute a streaming chat call against Ollama. + + Yields dictionaries containing token data from the streaming response. + Each yielded dict may contain 'message' with 'content' field for tokens. + """ + # Return error message instead of raising when unavailable + if not self._is_available: + yield { + "role": "assistant", + "content": "Ollama is not running. Start Ollama with: ollama serve", + "done": True, + } + return + + payload = { + "model": model, + "messages": list(messages), + "stream": True, + "think": True, # Enable thinking output for thinking models + } + + # Add options if provided + if options: + payload["options"] = options + + try: - stream = self._call_sdk( - "chat", - model=model, - messages=list(messages), - stream=True, + req = Request( + f"{self._host}/api/chat", + data=json.dumps(payload).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method="POST", ) - except OllamaClientError as exc: - raise OllamaClientError(f"Failed to start streaming chat: {exc}") from exc + + with urlopen(req, timeout=120) as response: + # Update availability flag on successful connection + self._is_available = True + + # Read streaming response line by line + for line in response: + if not line: + continue + + try: + chunk = json.loads(line.decode("utf-8")) + - if not hasattr(stream, "__iter__"): - raise OllamaClientError("Ollama returned a non-iterable stream response.") - return iter(stream) + + yield chunk + + # Check if streaming is complete + if chunk.get("done", False): + break + except json.JSONDecodeError: + # Skip malformed JSON lines + continue + + except (URLError, HTTPError, TimeoutError, Exception) as exc: + # Update availability flag and yield error message + self._is_available = False + yield { + "role": "assistant", + "content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve", + "done": True, + "error": True, + } # ------------------------------------------------------------------ internals - def _call_sdk(self, method: str, *args: Any, **kwargs: Any) -> Any: - if not self.is_available: - raise OllamaUnavailableError( - "Ollama Python SDK is not available in the environment." - ) + def _make_request( + self, endpoint: str, method: str = "GET", data: dict | None = None + ) -> dict: + """Make an HTTP request to the Ollama API.""" + url = f"{self._host}{endpoint}" - target = self._client if self._client is not None else ollama - if target is None or not hasattr(target, method): - raise OllamaClientError( - f"Ollama SDK does not expose method '{method}'. Install or update the SDK." + if data: + req = Request( + url, + data=json.dumps(data).encode("utf-8"), + headers={"Content-Type": "application/json"}, + method=method, ) - - func = getattr(target, method) + else: + req = Request(url, method=method) try: - return func(*args, **kwargs) - except Exception as exc: # pragma: no cover - network errors depend on runtime - raise OllamaClientError(str(exc)) from exc + with urlopen(req, timeout=30) as response: + return json.loads(response.read().decode()) + except (URLError, HTTPError) as exc: + raise OllamaClientError(f"Request failed: {exc}") from exc diff --git a/ollama_monitor.py b/ollama_monitor.py new file mode 100644 index 0000000..5146dfe --- /dev/null +++ b/ollama_monitor.py @@ -0,0 +1,103 @@ +"""Monitor for tracking Ollama availability and notifying on state changes.""" + +from __future__ import annotations + +from typing import Callable +from gi.repository import GLib +from .ollama_client import OllamaClient + + +class OllamaAvailabilityMonitor: + """Monitors Ollama availability and notifies callbacks on state changes.""" + + def __init__(self, client: OllamaClient, check_interval_seconds: int = 30) -> None: + """ + Initialize the availability monitor. + + Args: + client: OllamaClient instance to monitor + check_interval_seconds: How often to check availability (default: 30s) + """ + self._client = client + self._check_interval_ms = check_interval_seconds * 1000 + self._callbacks: list[Callable[[bool], None]] = [] + self._last_state: bool | None = None + self._timeout_id: int | None = None + self._is_running = False + + def add_callback(self, callback: Callable[[bool], None]) -> None: + """ + Add a callback to be notified when availability changes. + + Args: + callback: Function that takes a bool (True if available, False if not) + """ + if callback not in self._callbacks: + self._callbacks.append(callback) + + def remove_callback(self, callback: Callable[[bool], None]) -> None: + """Remove a previously registered callback.""" + if callback in self._callbacks: + self._callbacks.remove(callback) + + def start(self) -> None: + """Start monitoring Ollama availability.""" + if self._is_running: + return + + self._is_running = True + # Do an immediate check + self._check_availability() + # Schedule periodic checks + self._timeout_id = GLib.timeout_add( + self._check_interval_ms, + self._check_availability + ) + + def stop(self) -> None: + """Stop monitoring Ollama availability.""" + if not self._is_running: + return + + self._is_running = False + if self._timeout_id is not None: + GLib.source_remove(self._timeout_id) + self._timeout_id = None + + def _check_availability(self) -> bool: + """ + Check current availability and notify callbacks if state changed. + + Returns: + True to continue periodic checks, False to stop + """ + # Force a connection check + self._client._check_connection() + current_state = self._client.is_available + + # Only notify if state changed + if self._last_state is None or self._last_state != current_state: + self._last_state = current_state + self._notify_callbacks(current_state) + + # Return True to continue periodic checks + return True + + def _notify_callbacks(self, is_available: bool) -> None: + """Notify all registered callbacks of the availability state.""" + for callback in self._callbacks: + try: + callback(is_available) + except Exception as e: + # Log error but don't let one callback failure affect others + print(f"Error in availability callback: {e}") + + @property + def is_running(self) -> bool: + """Check if the monitor is currently running.""" + return self._is_running + + @property + def current_state(self) -> bool | None: + """Get the last known availability state (None if never checked).""" + return self._last_state diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index ef92a67..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,16 +0,0 @@ -[tool.black] -line-length = 88 -target-version = ["py313"] - -[tool.ruff] -line-length = 88 -target-version = "py313" - -[tool.ruff.lint] -select = ["E", "F", "I"] -ignore = [] - -[tool.pytest.ini_options] -minversion = "8.0" -addopts = "-ra" -testpaths = ["tests"] diff --git a/reasoning_controller.py b/reasoning_controller.py new file mode 100644 index 0000000..f9eb55d --- /dev/null +++ b/reasoning_controller.py @@ -0,0 +1,127 @@ +"""Reasoning mode controller for managing AI reasoning preferences.""" + +from __future__ import annotations + +import json +import os +from dataclasses import dataclass, asdict +from pathlib import Path +from typing import Dict, Any + + +@dataclass +class PreferencesState: + """User preferences for sidebar behavior.""" + reasoning_enabled: bool = False + default_model: str | None = None + theme: str = "default" + + +class ReasoningController: + """Manages reasoning mode state and model selection.""" + + # Model names for reasoning toggle + INSTRUCT_MODEL = "hf.co/unsloth/Qwen3-4B-Instruct-2507-GGUF:Q8_K_XL" + THINKING_MODEL = "hf.co/unsloth/Qwen3-4B-Thinking-2507-GGUF:Q8_K_XL" + + def __init__(self): + """Initialize the reasoning controller with preference persistence.""" + # Set preference file path + config_dir = Path.home() / ".config" / "aisidebar" + config_dir.mkdir(parents=True, exist_ok=True) + self._preference_file = config_dir / "preferences.json" + + # Load preferences from disk + self._preferences = self._load_preferences() + + def is_enabled(self) -> bool: + """Check if reasoning mode is active. + + Returns: + True if reasoning mode is enabled, False otherwise + """ + return self._preferences.reasoning_enabled + + def toggle(self) -> bool: + """Toggle reasoning mode and persist preference. + + Returns: + New state of reasoning mode (True if enabled, False if disabled) + """ + self._preferences.reasoning_enabled = not self._preferences.reasoning_enabled + self._save_preferences() + return self._preferences.reasoning_enabled + + def get_model_name(self) -> str: + """Return the appropriate model name based on reasoning mode. + + Returns: + THINKING_MODEL if reasoning is enabled, INSTRUCT_MODEL otherwise + """ + return self.THINKING_MODEL if self._preferences.reasoning_enabled else self.INSTRUCT_MODEL + + def get_model_options(self) -> Dict[str, Any]: + """Return the optimal parameters for the current model. + + Returns: + Dictionary of model-specific parameters + """ + if self._preferences.reasoning_enabled: + # Thinking model settings + return { + "temperature": 0.6, + "top_p": 0.95, + "top_k": 20, + "min_p": 0.0, + "num_predict": 32768, # Adequate output length + } + else: + # Instruct model settings + return { + "temperature": 0.7, + "top_p": 0.8, + "top_k": 20, + "min_p": 0.0, + "num_predict": 32768, + } + + def _load_preferences(self) -> PreferencesState: + """Load preferences from disk or create defaults. + + Returns: + PreferencesState instance with loaded or default values + """ + if not self._preference_file.exists(): + return PreferencesState() + + try: + with self._preference_file.open("r", encoding="utf-8") as f: + data = json.load(f) + return PreferencesState( + reasoning_enabled=data.get("reasoning_enabled", False), + default_model=data.get("default_model"), + theme=data.get("theme", "default"), + ) + except (json.JSONDecodeError, OSError): + # If file is corrupted or unreadable, return defaults + return PreferencesState() + + def _save_preferences(self) -> None: + """Persist preferences to disk atomically.""" + try: + # Convert dataclass to dict + data = asdict(self._preferences) + + # Write to temporary file first + temp_file = self._preference_file.with_suffix(".tmp") + with temp_file.open("w", encoding="utf-8") as f: + json.dump(data, f, indent=2, ensure_ascii=False) + f.flush() + os.fsync(f.fileno()) + + # Atomic replace + os.replace(temp_file, self._preference_file) + except OSError: + # If save fails, continue without crashing + # Preferences will revert to previous state on next load + pass diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1a98163..0000000 --- a/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -PyGObject -ollama -pytest -ruff -black diff --git a/run.sh b/run.sh deleted file mode 100755 index 27e4a6d..0000000 --- a/run.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Launcher script for Niri AI Sidebar with proper Wayland configuration - -# Get the directory where this script is located -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" - -# Activate virtual environment if it exists -if [ -f "$SCRIPT_DIR/.venv/bin/activate" ]; then - source "$SCRIPT_DIR/.venv/bin/activate" -fi - -# Force GTK to use native Wayland backend (not XWayland) -export GDK_BACKEND=wayland - -# Preload GTK4 Layer Shell library to ensure proper initialization -export LD_PRELOAD=/usr/lib/libgtk4-layer-shell.so - -# Run the application -exec python3 "$SCRIPT_DIR/main.py" "$@" diff --git a/sidebar-plan.md b/sidebar-plan.md deleted file mode 100644 index 73ec0a2..0000000 --- a/sidebar-plan.md +++ /dev/null @@ -1,640 +0,0 @@ - - -# AI Chat Sidebar Development Plan - Direct GTK4 Approach - -Based on your comprehensive feasibility assessment and the gathered documentation, here's a production-ready development plan using **direct GTK4 + Python** instead of Ignis. - -*** - -## Documentation Resources - -### Core Documentation - -1. **gtk4-layer-shell**: https://github.com/wmww/gtk4-layer-shell - - Python examples in `examples/` directory - - API documentation for layer positioning - - Installation: `pacman -S gtk4-layer-shell` (Arch) or build from source -2. **PyGObject Threading Guide**: https://pygobject.gnome.org/guide/threading.html - - Essential patterns for `GLib.idle_add()` usage - - Thread-safety guidelines for GTK operations -3. **Ollama Python Library**: https://github.com/ollama/ollama-python - - Installation: `pip install ollama` - - Streaming chat API with `stream=True` parameter -4. **Niri Configuration**: https://github.com/YaLTeR/niri/wiki/Configuration:-Introduction - - KDL syntax guide - - Layer-rules documentation - - Live reload capabilities -5. **Alpaca Reference Implementation**: https://github.com/Jeffser/Alpaca - - Production GTK4 + Ollama patterns - - Threading implementation examples - - UI/UX patterns for chat interfaces - -*** - -## Project Structure - -``` -ai-sidebar/ -β”œβ”€β”€ main.py # Entry point, Gtk.Application setup -β”œβ”€β”€ sidebar_window.py # Layer-shell window with GTK4 -β”œβ”€β”€ ollama_client.py # Ollama API wrapper with threading -β”œβ”€β”€ message_widget.py # Individual message bubble -β”œβ”€β”€ conversation_manager.py # State management, persistence -β”œβ”€β”€ styles.css # GTK CSS styling -β”œβ”€β”€ config.py # User settings (model, API endpoint) -└── data/ - └── conversations/ # XDG_DATA_HOME/ai-sidebar/ - β”œβ”€β”€ index.json # Session index - └── {uuid}.json # Individual conversations -``` - - -*** - -## Development Phases - -### **Phase 1: Minimal Proof-of-Concept (Days 1-2)** - -**Objective**: Validate gtk4-layer-shell works with Niri and basic Ollama connectivity - -#### Tasks: - -1. **Install dependencies**: -```bash -# Arch Linux -sudo pacman -S gtk4 gtk4-layer-shell python-gobject python-pip -pip install ollama - -# Clone examples -git clone https://github.com/wmww/gtk4-layer-shell -cd gtk4-layer-shell/examples -# Study Python examples -``` - -2. **Create minimal sidebar** (`minimal_poc.py`): -```python -#!/usr/bin/env python3 -import gi -gi.require_version('Gtk', '4.0') -gi.require_version('Gtk4LayerShell', '1.0') -from gi.repository import Gtk, Gtk4LayerShell, GLib -import ollama -import threading - -class MinimalSidebar(Gtk.ApplicationWindow): - def __init__(self, app): - super().__init__(application=app, title="AI Sidebar") - - # Initialize layer shell - Gtk4LayerShell.init_for_window(self) - Gtk4LayerShell.set_namespace(self, "ai-sidebar") - Gtk4LayerShell.set_layer(self, Gtk4LayerShell.Layer.TOP) - - # Anchor to left edge, full height - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.LEFT, True) - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.TOP, True) - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.BOTTOM, True) - Gtk4LayerShell.set_margin(self, Gtk4LayerShell.Edge.LEFT, 0) - - # Request keyboard input - Gtk4LayerShell.set_keyboard_mode(self, - Gtk4LayerShell.KeyboardMode.ON_DEMAND) - - # Set width - self.set_default_size(350, -1) - - # Build UI - self.setup_ui() - - def setup_ui(self): - main_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0) - - # Chat display area - self.textview = Gtk.TextView() - self.textview.set_editable(False) - self.textview.set_wrap_mode(Gtk.WrapMode.WORD) - self.textview.set_margin_start(10) - self.textview.set_margin_end(10) - - scroll = Gtk.ScrolledWindow() - scroll.set_child(self.textview) - scroll.set_vexpand(True) - scroll.set_hexpand(True) - - # Input area - input_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5) - input_box.set_margin_start(10) - input_box.set_margin_end(10) - input_box.set_margin_top(5) - input_box.set_margin_bottom(10) - - self.entry = Gtk.Entry() - self.entry.set_hexpand(True) - self.entry.set_placeholder_text("Ask something...") - self.entry.connect('activate', self.on_send) - - send_btn = Gtk.Button(label="Send") - send_btn.connect('clicked', self.on_send) - - input_box.append(self.entry) - input_box.append(send_btn) - - main_box.append(scroll) - main_box.append(input_box) - - self.set_child(main_box) - - def append_text(self, text): - """Thread-safe text append""" - buffer = self.textview.get_buffer() - end_iter = buffer.get_end_iter() - buffer.insert(end_iter, text, -1) - - # Auto-scroll to bottom - mark = buffer.create_mark(None, end_iter, False) - self.textview.scroll_to_mark(mark, 0.0, True, 0.0, 1.0) - - def on_send(self, widget): - prompt = self.entry.get_text().strip() - if not prompt: - return - - self.entry.set_text("") - GLib.idle_add(self.append_text, f"\n[You] {prompt}\n") - - def worker(): - try: - GLib.idle_add(self.append_text, "[AI] ") - - # Stream response from Ollama - stream = ollama.chat( - model='llama3.2', # Use installed model - messages=[{'role': 'user', 'content': prompt}], - stream=True - ) - - for chunk in stream: - content = chunk['message']['content'] - GLib.idle_add(self.append_text, content) - - GLib.idle_add(self.append_text, "\n") - - except Exception as e: - GLib.idle_add(self.append_text, f"\n[Error] {str(e)}\n") - - thread = threading.Thread(target=worker, daemon=True) - thread.start() - -class App(Gtk.Application): - def do_activate(self): - window = MinimalSidebar(self) - window.present() - -if __name__ == '__main__': - app = App() - app.run(None) -``` - -3. **Test basic functionality**: -```bash -# Ensure Ollama is running with a model -ollama pull llama3.2 - -# Run the sidebar -python minimal_poc.py -``` - -4. **Add Niri configuration** (`~/.config/niri/config.kdl`): -```kdl -layer-rule { - match namespace="^ai-sidebar$" - - // Optional: Add shadow - shadow { - on - softness 40 - spread 5 - offset x=0 y=5 - } -} - -binds { - // Toggle sidebar with Super+A - Mod+A { spawn "python" "/path/to/minimal_poc.py"; } -} -``` - -**AI Coding Tool Prompt for Phase 1**: - -``` -Create a minimal GTK4 application using gtk4-layer-shell that: -1. Initializes layer-shell with namespace "ai-sidebar" -2. Anchors to left edge, full height, 350px width -3. Uses Gtk4LayerShell.Layer.TOP for z-order -4. Contains a TextView (read-only) and Entry widget -5. Connects Entry's 'activate' signal to send messages -6. Uses threading.Thread + GLib.idle_add for Ollama streaming -7. Calls ollama.chat() with stream=True in background thread -8. Appends each chunk to TextView via GLib.idle_add - -Requirements: -- Python 3.11+, GTK4, gtk4-layer-shell, ollama-python -- Thread-safe UI updates only via GLib.idle_add -- Basic error handling for connection failures -- Auto-scroll TextView to bottom after each message - -Reference PyGObject threading guide patterns. -``` - - -*** - -### **Phase 2: Production Features (Days 3-5)** - -**Objective**: Add model selection, conversation persistence, better UI - -#### Components: - -1. **Ollama Client** (`ollama_client.py`): -```python -import ollama -import threading -from typing import Callable, Optional, List, Dict -from gi.repository import GLib - -class OllamaClient: - def __init__(self): - self.base_url = "http://localhost:11434" - self.current_model = "llama3.2" - self._cancel_event = threading.Event() - - def get_models(self) -> List[str]: - """Get list of installed models""" - try: - models = ollama.list() - return [m['name'] for m in models['models']] - except Exception as e: - print(f"Error fetching models: {e}") - return [] - - def stream_chat( - self, - messages: List[Dict[str, str]], - on_chunk: Callable[[str], None], - on_complete: Callable[[], None], - on_error: Callable[[str], None] - ): - """Stream chat response in background thread""" - self._cancel_event.clear() - - def worker(): - try: - stream = ollama.chat( - model=self.current_model, - messages=messages, - stream=True - ) - - for chunk in stream: - if self._cancel_event.is_set(): - break - - content = chunk['message']['content'] - GLib.idle_add(on_chunk, content) - - if not self._cancel_event.is_set(): - GLib.idle_add(on_complete) - - except Exception as e: - GLib.idle_add(on_error, str(e)) - - thread = threading.Thread(target=worker, daemon=True) - thread.start() - - def cancel(self): - """Cancel current streaming operation""" - self._cancel_event.set() -``` - -2. **Conversation Manager** (`conversation_manager.py`): -```python -import json -import uuid -from pathlib import Path -from typing import List, Dict, Optional -from datetime import datetime - -class ConversationManager: - def __init__(self): - # XDG data directory - self.data_dir = Path.home() / ".local/share/ai-sidebar/conversations" - self.data_dir.mkdir(parents=True, exist_ok=True) - - self.index_file = self.data_dir / "index.json" - self.current_session_id: Optional[str] = None - self.messages: List[Dict[str, str]] = [] - - def new_session(self) -> str: - """Create new conversation session""" - session_id = str(uuid.uuid4()) - self.current_session_id = session_id - self.messages = [] - self._update_index(session_id) - return session_id - - def add_message(self, role: str, content: str): - """Add message to current session""" - self.messages.append({ - "role": role, - "content": content, - "timestamp": datetime.now().isoformat() - }) - self._save_session() - - def load_session(self, session_id: str) -> List[Dict]: - """Load conversation from file""" - session_file = self.data_dir / f"{session_id}.json" - if session_file.exists(): - with open(session_file, 'r') as f: - data = json.load(f) - self.messages = data.get('messages', []) - self.current_session_id = session_id - return self.messages - return [] - - def _save_session(self): - """Save current session to disk""" - if not self.current_session_id: - return - - session_file = self.data_dir / f"{self.current_session_id}.json" - with open(session_file, 'w') as f: - json.dump({ - "session_id": self.current_session_id, - "created": datetime.now().isoformat(), - "messages": self.messages - }, f, indent=2) - - def _update_index(self, session_id: str): - """Update session index""" - index = [] - if self.index_file.exists(): - with open(self.index_file, 'r') as f: - index = json.load(f) - - index.append({ - "id": session_id, - "created": datetime.now().isoformat() - }) - - with open(self.index_file, 'w') as f: - json.dump(index, f, indent=2) -``` - -3. **Model Selector Widget**: - -Add to sidebar window: - -```python -def setup_header(self): - header = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10) - header.set_margin_start(10) - header.set_margin_end(10) - header.set_margin_top(10) - - # Model dropdown - self.model_combo = Gtk.ComboBoxText() - self.model_combo.set_hexpand(True) - self.refresh_models() - self.model_combo.connect('changed', self.on_model_changed) - - # New chat button - new_btn = Gtk.Button(label="New") - new_btn.connect('clicked', self.on_new_chat) - - header.append(self.model_combo) - header.append(new_btn) - - return header - -def refresh_models(self): - models = self.ollama_client.get_models() - self.model_combo.remove_all() - for model in models: - self.model_combo.append_text(model) - if models: - self.model_combo.set_active(0) -``` - -**AI Coding Tool Prompt for Phase 2**: - -``` -Extend the minimal sidebar with: - -1. OllamaClient class: - - get_models() method calling ollama.list() - - stream_chat() with threading.Event for cancellation - - Callbacks: on_chunk, on_complete, on_error - - Thread-safe via GLib.idle_add - -2. ConversationManager class: - - XDG data directory: ~/.local/share/ai-sidebar/conversations/ - - new_session() creates UUID, initializes messages list - - add_message(role, content) appends and saves to JSON - - load_session(id) loads from {uuid}.json file - - Auto-save after each message with fsync - -3. UI additions: - - Header box with Gtk.ComboBoxText for model selection - - "New Chat" button to clear conversation - - Populate ComboBox from get_models() - - Update ollama_client.current_model on selection change - -Maintain thread safety and error handling patterns from Phase 1. -``` - - -*** - -### **Phase 3: Polish \& Integration (Days 6-7)** - -**Objective**: UI improvements, Niri integration, keyboard shortcuts - -#### Tasks: - -1. **CSS Styling** (`styles.css`): -```css -window { - background-color: #1e1e2e; -} - -textview { - background-color: #181825; - color: #cdd6f4; - font-family: monospace; - font-size: 12pt; -} - -entry { - background-color: #313244; - color: #cdd6f4; - border-radius: 8px; - padding: 8px; -} - -button { - background-color: #89b4fa; - color: #1e1e2e; - border-radius: 8px; - padding: 8px 16px; -} -``` - -Load in application: - -```python -css_provider = Gtk.CssProvider() -css_provider.load_from_path('styles.css') -Gtk.StyleContext.add_provider_for_display( - self.get_display(), - css_provider, - Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION -) -``` - -2. **Keyboard Shortcuts**: -```python -def setup_keybindings(self): - event_controller = Gtk.EventControllerKey() - event_controller.connect('key-pressed', self.on_key_pressed) - self.add_controller(event_controller) - -def on_key_pressed(self, controller, keyval, keycode, state): - # Escape to close - if keyval == Gdk.KEY_Escape: - self.close() - return True - - # Ctrl+N for new chat - if (state & Gdk.ModifierType.CONTROL_MASK and - keyval == Gdk.KEY_n): - self.on_new_chat(None) - return True - - return False -``` - -3. **Niri Toggle Script** (`toggle-sidebar.sh`): -```bash -#!/bin/bash -PID=$(pgrep -f "python.*main.py") - -if [ -z "$PID" ]; then - # Start sidebar - python /path/to/ai-sidebar/main.py & -else - # Kill sidebar - kill $PID -fi -``` - -Update Niri config: - -```kdl -binds { - Mod+A { spawn "bash" "/path/to/toggle-sidebar.sh"; } -} -``` - - -*** - -## Testing Checklist - -- [ ] Sidebar appears at left edge with correct dimensions -- [ ] Layer-shell positioning works (stays on top, doesn't block clicks outside) -- [ ] Keyboard input works in Entry widget -- [ ] Messages stream smoothly from Ollama -- [ ] Model selector populates with installed models -- [ ] Model switching changes active model -- [ ] New chat clears conversation -- [ ] Conversations persist across restarts -- [ ] Threading doesn't freeze UI -- [ ] Cancel works (if implemented) -- [ ] No memory leaks during extended use -- [ ] Compatible with Exo shell (no namespace conflicts) -- [ ] CSS styling applies correctly -- [ ] Escape key closes sidebar -- [ ] Toggle script works from Niri keybind - -*** - -## Timeline Summary - -| Phase | Duration | Deliverable | -| :-- | :-- | :-- | -| Phase 1: POC | 2 days | Working sidebar with basic chat | -| Phase 2: Features | 3 days | Model selection, persistence, better UI | -| Phase 3: Polish | 2 days | Styling, keybinds, Niri integration | -| **Total** | **7 days** | **Production-ready sidebar** | - -**Realistic estimate**: 10-14 days accounting for debugging and learning curve. - -*** - -## Key Success Factors - -1. **Start simple**: Phase 1 POC validates everything works before investing time -2. **Reference Alpaca**: Study their threading patterns and UI implementations -3. **Test incrementally**: Each feature works before moving to next -4. **Use AI tools effectively**: Break prompts into discrete components -5. **Follow PyGObject patterns**: Threading via `GLib.idle_add()` is critical - -This plan avoids Ignis instability while achieving your goal with mature, well-documented technologies.[^1][^2][^3] -[^10][^11][^12][^13][^14][^15][^16][^17][^18][^19][^20][^21][^22][^4][^5][^6][^7][^8][^9] - -
⁂
- -[^1]: https://pygobject.gnome.org/guide/threading.html - -[^2]: https://github.com/wmww/gtk4-layer-shell - -[^3]: https://github.com/Jeffser/Alpaca - -[^4]: https://stackoverflow.com/questions/73665239/implementing-threading-in-a-python-gtk-application-pygobject-to-prevent-ui-fre - -[^5]: https://gitlab.gnome.org/GNOME/pygobject/-/blob/3.49.0/docs/guide/threading.rst - -[^6]: https://discourse.gnome.org/t/gtk-threading-problem-with-glib-idle-add/13597 - -[^7]: https://gist.github.com/bossjones/e21b53c6dff04e8fdb3d - -[^8]: https://dunkelstern.de/articles/2025-01-24/index.html - -[^9]: https://www.glukhov.org/post/2025/10/ollama-python-examples/ - -[^10]: https://www.reddit.com/r/learnpython/comments/fa9612/pygtk_glade_threading/ - -[^11]: https://github.com/YaLTeR/niri/wiki/Configuration:-Introduction - -[^12]: https://github.com/ollama/ollama-python - -[^13]: https://pygtk.daa.com.narkive.com/QpH3Y5ky/idle-add-vs-threads-enter-threads-leave - -[^14]: https://github.com/YaLTeR/niri/discussions/674 - -[^15]: https://ollama.com/blog/streaming-tool - -[^16]: https://docs.gtk.org/glib/func.idle_add.html - -[^17]: https://yalter.github.io/niri/Configuration:-Window-Rules.html - -[^18]: https://www.cohorte.co/blog/using-ollama-with-python-step-by-step-guide - -[^19]: https://gnulinux.ch/ein-kleines-gtk4-programm-in-python - -[^20]: https://yalter.github.io/niri/Getting-Started.html - -[^21]: https://www.reddit.com/r/Python/comments/1ael05l/ollama_python_library_chat_method_system_message/ - -[^22]: https://git.yaroslavps.com/configs/swayrice/tree/dotfiles/.config/niri/config.kdl?id=dd00aee82134d4f1b41463c5371f1ee943a9ec7a diff --git a/sidebar_window.py b/sidebar_window.py deleted file mode 100644 index ab81425..0000000 --- a/sidebar_window.py +++ /dev/null @@ -1,223 +0,0 @@ -"""GTK sidebar window definitions.""" - -from __future__ import annotations - -import threading -from typing import Iterable - -import gi - -gi.require_version("Gtk", "4.0") -from gi.repository import GLib, Gtk # noqa: E402 - -try: # pragma: no cover - optional dependency may not be available in CI - gi.require_version("Gtk4LayerShell", "1.0") - from gi.repository import Gtk4LayerShell # type: ignore[attr-defined] -except (ImportError, ValueError): # pragma: no cover - fallback path - Gtk4LayerShell = None # type: ignore[misc] - -from conversation_manager import ConversationManager -from ollama_client import OllamaClient - - -class SidebarWindow(Gtk.ApplicationWindow): - """Layer-shell anchored window hosting the chat interface.""" - - def __init__(self, **kwargs) -> None: - super().__init__(**kwargs) - - # CRITICAL: Layer shell must be initialized BEFORE any window properties - self._setup_layer_shell() - - self.set_default_size(360, 720) - self.set_title("Niri AI Sidebar") - self.set_hide_on_close(False) - - self._conversation_manager = ConversationManager() - self._ollama_client = OllamaClient() - self._current_model = self._ollama_client.default_model - - self._build_ui() - self._populate_initial_messages() - - # ------------------------------------------------------------------ UI setup - def _setup_layer_shell(self) -> None: - """Attach the window to the left edge via gtk4-layer-shell when available.""" - if Gtk4LayerShell is None: - return - - Gtk4LayerShell.init_for_window(self) - - # Verify initialization succeeded before configuring layer shell properties - if not Gtk4LayerShell.is_layer_window(self): - return - - Gtk4LayerShell.set_namespace(self, "niri-ai-sidebar") - Gtk4LayerShell.set_layer(self, Gtk4LayerShell.Layer.TOP) - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.LEFT, True) - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.TOP, True) - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.BOTTOM, True) - Gtk4LayerShell.set_anchor(self, Gtk4LayerShell.Edge.RIGHT, False) - Gtk4LayerShell.set_margin(self, Gtk4LayerShell.Edge.LEFT, 0) - Gtk4LayerShell.set_keyboard_mode( - self, Gtk4LayerShell.KeyboardMode.ON_DEMAND - ) - Gtk4LayerShell.set_exclusive_zone(self, -1) - - def _build_ui(self) -> None: - """Create the core layout: message history and input entry.""" - main_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=12) - main_box.set_margin_top(16) - main_box.set_margin_bottom(16) - main_box.set_margin_start(16) - main_box.set_margin_end(16) - main_box.set_hexpand(True) - main_box.set_vexpand(True) - - header_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=4) - header_title = Gtk.Label(label="Niri AI Sidebar") - header_title.set_halign(Gtk.Align.START) - header_title.get_style_context().add_class("title-2") - - model_name = self._current_model or "No local model detected" - self._model_label = Gtk.Label(label=f"Model: {model_name}") - self._model_label.set_halign(Gtk.Align.START) - self._model_label.get_style_context().add_class("dim-label") - - header_box.append(header_title) - header_box.append(self._model_label) - - self._message_list = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=8) - self._message_list.set_hexpand(True) - self._message_list.set_vexpand(True) - self._message_list.set_valign(Gtk.Align.START) - - scroller = Gtk.ScrolledWindow() - scroller.set_hexpand(True) - scroller.set_vexpand(True) - scroller.set_child(self._message_list) - scroller.set_min_content_height(300) - self._scroller = scroller - - input_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=8) - input_box.set_hexpand(True) - - self._entry = Gtk.Entry() - self._entry.set_hexpand(True) - self._entry.set_placeholder_text("Ask a question…") - self._entry.connect("activate", self._on_submit) - - # Add focus event controller to properly handle focus-out events - # The handler must return False to propagate the event to GTK's default handler - focus_controller = Gtk.EventControllerFocus() - focus_controller.connect("leave", lambda c: False) - self._entry.add_controller(focus_controller) - - self._send_button = Gtk.Button(label="Send") - self._send_button.connect("clicked", self._on_submit) - - input_box.append(self._entry) - input_box.append(self._send_button) - - main_box.append(header_box) - main_box.append(scroller) - main_box.append(input_box) - - self.set_child(main_box) - - def _populate_initial_messages(self) -> None: - """Render conversation history stored on disk.""" - for message in self._conversation_manager.messages: - self._append_message(message["role"], message["content"], persist=False) - - if not self._conversation_manager.messages: - self._append_message( - "assistant", - "Welcome! Ask a question to start a conversation.", - persist=True, - ) - - # ------------------------------------------------------------------ helpers - def _append_message( - self, role: str, content: str, *, persist: bool = True - ) -> None: - """Add a message bubble to the history and optionally persist it.""" - label_prefix = "You" if role == "user" else "Assistant" - label = Gtk.Label(label=f"{label_prefix}: {content}") - label.set_halign(Gtk.Align.START) - label.set_xalign(0.0) - label.set_wrap(True) - label.set_wrap_mode(Gtk.WrapMode.WORD_CHAR) - label.set_justify(Gtk.Justification.LEFT) - - self._message_list.append(label) - self._scroll_to_bottom() - - if persist: - self._conversation_manager.append_message(role, content) - - def _scroll_to_bottom(self) -> None: - """Ensure the most recent message is visible.""" - def _scroll() -> bool: - adjustment = self._scroller.get_vadjustment() - if adjustment is not None: - adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size()) - return False - - GLib.idle_add(_scroll) - - def _set_input_enabled(self, enabled: bool) -> None: - self._entry.set_sensitive(enabled) - self._send_button.set_sensitive(enabled) - - # ------------------------------------------------------------------ callbacks - def _on_submit(self, _widget: Gtk.Widget) -> None: - """Handle send button clicks or entry activation.""" - text = self._entry.get_text().strip() - if not text: - return - - self._entry.set_text("") - self._append_message("user", text, persist=True) - self._request_response() - - def _request_response(self) -> None: - """Trigger a synchronous Ollama chat call on a worker thread.""" - model = self._current_model or self._ollama_client.default_model - if not model: - self._append_message( - "assistant", - "No Ollama models are available. Install a model to continue.", - persist=True, - ) - return - - history = self._conversation_manager.chat_messages - self._set_input_enabled(False) - - def _worker(messages: Iterable[dict[str, str]]) -> None: - response = self._ollama_client.chat(model=model, messages=list(messages)) - GLib.idle_add(self._handle_response, response, priority=GLib.PRIORITY_DEFAULT) - - thread = threading.Thread(target=_worker, args=(history,), daemon=True) - thread.start() - - def _handle_response(self, response: dict[str, str] | None) -> bool: - """Render the assistant reply and re-enable the entry.""" - self._set_input_enabled(True) - - if not response: - self._append_message( - "assistant", - "The model returned an empty response.", - persist=True, - ) - return False - - role = response.get("role", "assistant") - content = response.get("content") or "" - if not content: - content = "[No content received from Ollama]" - - self._append_message(role, content, persist=True) - return False diff --git a/streaming_handler.py b/streaming_handler.py new file mode 100644 index 0000000..ece36fb --- /dev/null +++ b/streaming_handler.py @@ -0,0 +1,222 @@ +"""Streaming response handler for progressive token display.""" + +from __future__ import annotations + +from gi.repository import GLib +from ignis import widgets + + +class StreamingHandler: + """Manages streaming response display with token-by-token updates.""" + + def __init__(self, message_widget: widgets.Label, scroller: widgets.Scroll, + thinking_widget: widgets.Label = None, thinking_box: widgets.Box = None): + """Initialize streaming handler. + + Args: + message_widget: The label widget to update with streaming content + scroller: The scroll widget to manage auto-scrolling + thinking_widget: Optional label widget for thinking content + thinking_box: Optional box container for thinking section + """ + self._widget = message_widget + self._thinking_widget = thinking_widget + self._thinking_box = thinking_box + self._scroller = scroller + self._buffer = "" + self._thinking_buffer = "" # Separate buffer for thinking content + self._token_buffer = [] + self._thinking_token_buffer = [] + self._is_streaming = False + self._buffer_size = 3 # Accumulate 3-5 tokens before UI update + + def start_stream(self) -> None: + """Initialize streaming state.""" + self._buffer = "" + self._thinking_buffer = "" + self._token_buffer = [] + self._thinking_token_buffer = [] + self._is_streaming = True + # Set initial text with streaming indicator + self._widget.label = "Assistant: ..." + + def append_token(self, token: str) -> None: + """Add token to buffer and update UI via GLib.idle_add. + + Args: + token: The token string to append + """ + if not self._is_streaming: + return + + # Add token to buffer + self._token_buffer.append(token) + + # Update UI when buffer reaches threshold + if len(self._token_buffer) >= self._buffer_size: + self._flush_buffer() + + def append_thinking_token(self, token: str) -> None: + """Add thinking token to separate buffer. + + Args: + token: The thinking token string to append + """ + if not self._is_streaming: + return + + # Add token to thinking buffer + self._thinking_token_buffer.append(token) + + # Update UI when buffer reaches threshold + if len(self._thinking_token_buffer) >= self._buffer_size: + self._flush_thinking_buffer() + + def _flush_buffer(self) -> None: + """Flush accumulated tokens to UI.""" + if not self._token_buffer: + return + + # Combine buffered tokens + tokens = "".join(self._token_buffer) + self._buffer += tokens + self._token_buffer = [] + + # Schedule UI update on main thread + GLib.idle_add(self._update_ui, priority=GLib.PRIORITY_DEFAULT) + + def _flush_thinking_buffer(self) -> None: + """Flush accumulated thinking tokens to UI.""" + if not self._thinking_token_buffer: + return + + # Combine buffered thinking tokens + tokens = "".join(self._thinking_token_buffer) + self._thinking_buffer += tokens + self._thinking_token_buffer = [] + + # Schedule UI update on main thread + GLib.idle_add(self._update_ui, priority=GLib.PRIORITY_DEFAULT) + + def _update_ui(self) -> bool: + """Update the widget label with current buffer content. + + Returns: + False to indicate this is a one-time callback + """ + # Update thinking widget if present + if self._thinking_widget and self._thinking_buffer: + self._thinking_widget.label = self._thinking_buffer + "..." + if self._thinking_box: + self._thinking_box.set_visible(True) + + # Update main response label with streaming indicator + self._widget.label = f"Assistant: {self._buffer}..." + + # Auto-scroll to bottom + self._scroll_to_bottom() + + return False # Don't repeat this callback + + def _scroll_to_bottom(self) -> None: + """Scroll to the latest content.""" + adjustment = self._scroller.get_vadjustment() + if adjustment: + adjustment.set_value(adjustment.get_upper() - adjustment.get_page_size()) + + def finish_stream(self) -> tuple[str, str]: + """Finalize streaming and return complete content. + + Returns: + Tuple of (thinking_content, main_content) + """ + self._is_streaming = False + + # Flush any remaining tokens + if self._token_buffer: + self._buffer += "".join(self._token_buffer) + self._token_buffer = [] + + if self._thinking_token_buffer: + self._thinking_buffer += "".join(self._thinking_token_buffer) + self._thinking_token_buffer = [] + + # Final UI update without streaming indicator + final_content = self._buffer + final_thinking = self._thinking_buffer + GLib.idle_add(self._finalize_ui, final_thinking, final_content, priority=GLib.PRIORITY_DEFAULT) + + return (final_thinking, final_content) + + def _finalize_ui(self, thinking: str, content: str) -> bool: + """Update UI with final content without streaming indicator. + + Args: + thinking: The final thinking content + content: The final complete content + + Returns: + False to indicate this is a one-time callback + """ + # Update thinking widget if present + if self._thinking_widget and thinking: + self._thinking_widget.label = thinking + if self._thinking_box: + self._thinking_box.set_visible(True) + + # Update main response without streaming indicator + self._widget.label = f"Assistant: {content}" + self._scroll_to_bottom() + return False + + def parse_reasoning_content(self, content: str) -> tuple[str | None, str]: + """Parse reasoning content from response if present. + + Looks for common reasoning patterns like: + - ... or ... tags + - [Reasoning: ...] markers + - Other model-specific formats + + Args: + content: The complete response content + + Returns: + Tuple of (reasoning_content, main_content) + If no reasoning found, returns (None, original_content) + """ + import re + + # Pattern 1a: ... tags (shorter form) + think_pattern = r'(.*?)\s*(.*)' + match = re.search(think_pattern, content, re.DOTALL | re.IGNORECASE) + if match: + reasoning = match.group(1).strip() + main_content = match.group(2).strip() + return (reasoning, main_content) + + # Pattern 1b: ... tags (longer form) + thinking_pattern = r'(.*?)\s*(.*)' + match = re.search(thinking_pattern, content, re.DOTALL | re.IGNORECASE) + if match: + reasoning = match.group(1).strip() + main_content = match.group(2).strip() + return (reasoning, main_content) + + # Pattern 2: [Reasoning: ...] followed by [Answer: ...] + reasoning_pattern = r'\[Reasoning:?\s*(.*?)\]\s*\[Answer:?\s*(.*?)\]' + match = re.search(reasoning_pattern, content, re.DOTALL | re.IGNORECASE) + if match: + reasoning = match.group(1).strip() + main_content = match.group(2).strip() + return (reasoning, main_content) + + # Pattern 3: "Reasoning:" followed by "Answer:" or "Conclusion:" + reasoning_pattern2 = r'Reasoning:\s*(.*?)\s*(?:Answer|Conclusion):\s*(.*)' + match = re.search(reasoning_pattern2, content, re.DOTALL | re.IGNORECASE) + if match: + reasoning = match.group(1).strip() + main_content = match.group(2).strip() + return (reasoning, main_content) + + # No reasoning pattern found + return (None, content) diff --git a/style.css b/style.css new file mode 100644 index 0000000..1b6f341 --- /dev/null +++ b/style.css @@ -0,0 +1,74 @@ +/* AI Sidebar TextView styling */ +textview { + background-color: rgba(255, 255, 255, 0.1); + color: white; + min-height: 40px; +} + +textview text { + background-color: transparent; +} + +.text-view-container { + background-color: rgba(255, 255, 255, 0.05); +} + +scrolledwindow { + background-color: transparent; +} + +frame { + border: 1px solid rgba(255, 255, 255, 0.2); +} + +/* Reasoning/Thinking content styling */ +.thinking-box { + background: linear-gradient(135deg, rgba(100, 150, 255, 0.15) 0%, rgba(80, 120, 200, 0.1) 100%); + border: 1px solid rgba(100, 150, 255, 0.3); + border-left: 4px solid rgba(100, 150, 255, 0.8); + padding: 12px; + margin: 8px 0px 12px 0px; + border-radius: 8px; +} + +.thinking-header-button { + background: transparent; + border: none; + padding: 4px 8px; + margin-bottom: 8px; +} + +.thinking-header-button:hover { + background: rgba(100, 150, 255, 0.1); + border-radius: 4px; +} + +.thinking-icon { + color: rgba(120, 170, 255, 1.0); + font-size: 0.8em; +} + +.thinking-header { + color: rgba(120, 170, 255, 1.0); + font-weight: bold; + font-size: 0.95em; +} + +.thinking-content { + color: rgba(200, 210, 230, 0.9); + font-style: italic; + font-size: 0.9em; + line-height: 1.5; + padding: 8px 4px; +} + +.reasoning-content { + color: rgba(255, 255, 255, 0.6); + font-style: italic; +} + +.reasoning-divider { + border-top: 1px solid rgba(255, 255, 255, 0.2); + margin-top: 8px; + margin-bottom: 8px; +} diff --git a/styles.css b/styles.css deleted file mode 100644 index c31f3df..0000000 --- a/styles.css +++ /dev/null @@ -1,5 +0,0 @@ -/* Global styles placeholder for the AI sidebar. */ - -body { - /* Replace with GTK CSS once widgets are implemented. */ -} diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/test_conversation_manager.py b/tests/test_conversation_manager.py deleted file mode 100644 index 4559a19..0000000 --- a/tests/test_conversation_manager.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import annotations - -import json -from pathlib import Path - -import pytest - -from conversation_manager import ConversationManager - - -def test_conversation_manager_persists_history(tmp_path: Path) -> None: - manager = ConversationManager(storage_dir=tmp_path, conversation_id="test") - manager.append_message("user", "Hello there!") - manager.append_message("assistant", "General Kenobi.") - - conversation_file = tmp_path / "test.json" - assert conversation_file.exists() - - data = json.loads(conversation_file.read_text(encoding="utf-8")) - assert len(data["messages"]) == 2 - assert data["messages"][0]["content"] == "Hello there!" - - reloaded = ConversationManager(storage_dir=tmp_path, conversation_id="test") - assert [msg["content"] for msg in reloaded.messages] == [ - "Hello there!", - "General Kenobi.", - ] - - -def test_conversation_manager_rejects_invalid_role(tmp_path: Path) -> None: - manager = ConversationManager(storage_dir=tmp_path, conversation_id="invalid") - - with pytest.raises(ValueError): - manager.append_message("narrator", "This should fail.") diff --git a/tests/test_smoke.py b/tests/test_smoke.py deleted file mode 100644 index f85deee..0000000 --- a/tests/test_smoke.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import annotations - -import os -import subprocess -import sys -from pathlib import Path - - -def test_main_runs_headless() -> None: - repo_root = Path(__file__).resolve().parents[1] - env = os.environ.copy() - env["AI_SIDEBAR_HEADLESS"] = "1" - - result = subprocess.run( - [sys.executable, "main.py"], - cwd=repo_root, - env=env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - check=False, - ) - - assert result.returncode == 0 - assert "Headless mode enabled" in result.stdout