refactor(aisidebar): restructure project and implement reasoning mode toggle

- Reorganize project structure and file locations
- Add ReasoningController to manage model selection and reasoning mode
- Update design and requirements for reasoning mode toggle
- Implement model switching between Qwen3-4B-Instruct and Qwen3-4B-Thinking models
- Remove deprecated files and consolidate project layout
- Add new steering and specification documentation
- Clean up and remove unnecessary files and directories
- Prepare for enhanced AI sidebar functionality with more flexible model handling
This commit is contained in:
Melvin Ragusa
2025-10-26 09:10:31 +01:00
parent 58bd935af0
commit 239242e2fc
73 changed files with 3094 additions and 2348 deletions

180
conversation_archive.py Normal file
View File

@@ -0,0 +1,180 @@
"""Conversation archive management for multi-conversation persistence."""
from __future__ import annotations
import hashlib
import json
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import List
from .conversation_manager import ConversationState, DEFAULT_CONVERSATION_ID
@dataclass
class ConversationMetadata:
"""Metadata for conversation list display."""
archive_id: str
title: str # AI-generated or user-provided title
created_at: str
updated_at: str
message_count: int
preview: str # First 50 chars of first user message
class ConversationArchive:
"""Manages multiple conversation files with archiving capabilities."""
def __init__(self, storage_dir: str | Path | None = None) -> None:
if storage_dir is None:
module_root = Path(__file__).resolve().parent
storage_dir = module_root / "data" / "conversations"
self._storage_dir = Path(storage_dir)
self._storage_dir.mkdir(parents=True, exist_ok=True)
def generate_archive_id(self) -> str:
"""Create unique archive ID: YYYYMMDD_HHMMSS_<short-hash>."""
now = datetime.now(timezone.utc)
timestamp_part = now.strftime("%Y%m%d_%H%M%S")
# Generate short hash from timestamp + microseconds for uniqueness
hash_input = f"{now.isoformat()}{now.microsecond}".encode("utf-8")
hash_digest = hashlib.sha256(hash_input).hexdigest()[:8]
return f"archive_{timestamp_part}_{hash_digest}"
def archive_conversation(
self,
conversation_state: ConversationState,
archive_id: str | None = None,
title: str | None = None
) -> str:
"""Save conversation with timestamp-based archive ID.
Args:
conversation_state: The conversation to archive
archive_id: Optional custom archive ID, generates one if not provided
title: Optional title for the conversation
Returns:
The archive ID used for the saved conversation
"""
if archive_id is None:
archive_id = self.generate_archive_id()
archive_path = self._storage_dir / f"{archive_id}.json"
payload = {
"id": archive_id,
"title": title or "",
"created_at": conversation_state.created_at,
"updated_at": conversation_state.updated_at,
"messages": conversation_state.messages,
}
with archive_path.open("w", encoding="utf-8") as fh:
json.dump(payload, fh, indent=2, ensure_ascii=False)
return archive_id
def list_conversations(self) -> List[ConversationMetadata]:
"""Return metadata for all saved conversations.
Scans the storage directory for conversation files and extracts metadata.
Excludes the default.json active conversation file.
Returns:
List of ConversationMetadata sorted by updated_at (newest first)
"""
conversations: List[ConversationMetadata] = []
for json_file in self._storage_dir.glob("*.json"):
# Skip the default active conversation
if json_file.stem == DEFAULT_CONVERSATION_ID:
continue
try:
with json_file.open("r", encoding="utf-8") as fh:
payload = json.load(fh)
archive_id = payload.get("id", json_file.stem)
title = payload.get("title", "")
created_at = payload.get("created_at", "")
updated_at = payload.get("updated_at", created_at)
messages = payload.get("messages", [])
message_count = len(messages)
# Extract preview from first user message
preview = ""
for msg in messages:
if msg.get("role") == "user":
content = msg.get("content", "")
preview = content[:50]
if len(content) > 50:
preview += "..."
break
# Use title if available, otherwise use archive_id
display_title = title if title else archive_id
metadata = ConversationMetadata(
archive_id=archive_id,
title=display_title,
created_at=created_at,
updated_at=updated_at,
message_count=message_count,
preview=preview,
)
conversations.append(metadata)
except (json.JSONDecodeError, OSError, KeyError):
# Skip corrupted or inaccessible files
continue
# Sort by updated_at, newest first
conversations.sort(key=lambda c: c.updated_at, reverse=True)
return conversations
def load_conversation(self, archive_id: str) -> ConversationState | None:
"""Load archived conversation by ID.
Args:
archive_id: The ID of the conversation to load
Returns:
ConversationState if found and valid, None otherwise
"""
archive_path = self._storage_dir / f"{archive_id}.json"
if not archive_path.exists():
return None
try:
with archive_path.open("r", encoding="utf-8") as fh:
payload = json.load(fh)
conversation_id = payload.get("id", archive_id)
created_at = payload.get("created_at", datetime.now(timezone.utc).isoformat())
updated_at = payload.get("updated_at", created_at)
messages = payload.get("messages", [])
# Validate messages structure
validated_messages = []
for msg in messages:
if isinstance(msg, dict) and "role" in msg and "content" in msg:
validated_messages.append(msg)
return ConversationState(
conversation_id=conversation_id,
created_at=created_at,
updated_at=updated_at,
messages=validated_messages,
)
except (json.JSONDecodeError, OSError, KeyError):
# Handle JSON parsing errors and missing files gracefully
return None