feat(aisidebar): implement Ollama availability handling and graceful startup
- Add comprehensive Ollama connection error handling strategy - Implement OllamaClient with non-blocking initialization and connection checks - Create OllamaAvailabilityMonitor for periodic Ollama connection tracking - Update design and requirements to support graceful Ollama unavailability - Add new project structure for AI sidebar module with initial implementation - Enhance error handling to prevent application crashes when Ollama is not running - Prepare for future improvements in AI sidebar interaction and resilience
This commit is contained in:
@@ -25,17 +25,39 @@ class OllamaClient:
|
||||
self._host = host
|
||||
self._client = None
|
||||
self._cached_models: list[str] | None = None
|
||||
self._is_available = False
|
||||
|
||||
if ollama is None:
|
||||
return
|
||||
|
||||
if host and hasattr(ollama, "Client"):
|
||||
self._client = ollama.Client(host=host) # type: ignore[call-arg]
|
||||
# Try to initialize client and check connection
|
||||
try:
|
||||
if host and hasattr(ollama, "Client"):
|
||||
self._client = ollama.Client(host=host) # type: ignore[call-arg]
|
||||
|
||||
# Test connection by attempting to list models
|
||||
self._check_connection()
|
||||
except Exception:
|
||||
# Silently fail - availability flag remains False
|
||||
pass
|
||||
|
||||
# ------------------------------------------------------------------ helpers
|
||||
def _check_connection(self) -> None:
|
||||
"""Check if Ollama is available and update internal flag."""
|
||||
if ollama is None:
|
||||
self._is_available = False
|
||||
return
|
||||
|
||||
try:
|
||||
# Attempt a simple list call to verify connection
|
||||
self._call_sdk("list") # type: ignore[arg-type]
|
||||
self._is_available = True
|
||||
except Exception:
|
||||
self._is_available = False
|
||||
|
||||
@property
|
||||
def is_available(self) -> bool:
|
||||
return ollama is not None
|
||||
return self._is_available
|
||||
|
||||
@property
|
||||
def default_model(self) -> str | None:
|
||||
@@ -52,7 +74,13 @@ class OllamaClient:
|
||||
|
||||
try:
|
||||
response = self._call_sdk("list") # type: ignore[arg-type]
|
||||
# Update availability flag on successful call
|
||||
self._is_available = True
|
||||
except OllamaClientError:
|
||||
self._is_available = False
|
||||
return []
|
||||
except Exception:
|
||||
self._is_available = False
|
||||
return []
|
||||
|
||||
models: list[str] = []
|
||||
@@ -84,10 +112,16 @@ class OllamaClient:
|
||||
) -> dict[str, str] | None:
|
||||
"""Execute a blocking chat call against Ollama."""
|
||||
if not self.is_available:
|
||||
return {
|
||||
"role": "assistant",
|
||||
"content": "Ollama SDK is not installed; install `ollama` to enable responses.",
|
||||
}
|
||||
if ollama is None:
|
||||
return {
|
||||
"role": "assistant",
|
||||
"content": "Ollama SDK is not installed; install `ollama` to enable responses.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"role": "assistant",
|
||||
"content": "Ollama is not running. Start Ollama with: ollama serve",
|
||||
}
|
||||
|
||||
try:
|
||||
result = self._call_sdk(
|
||||
@@ -96,10 +130,19 @@ class OllamaClient:
|
||||
messages=list(messages),
|
||||
stream=False,
|
||||
)
|
||||
# Update availability flag on successful call
|
||||
self._is_available = True
|
||||
except OllamaClientError as exc:
|
||||
self._is_available = False
|
||||
return {
|
||||
"role": "assistant",
|
||||
"content": f"Unable to reach Ollama: {exc}",
|
||||
"content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve",
|
||||
}
|
||||
except Exception as exc:
|
||||
self._is_available = False
|
||||
return {
|
||||
"role": "assistant",
|
||||
"content": f"Unable to reach Ollama: {exc}\n\nStart Ollama with: ollama serve",
|
||||
}
|
||||
|
||||
# Handle both dict responses (old SDK) and Pydantic objects (new SDK)
|
||||
|
||||
Reference in New Issue
Block a user