fix(agent): handle dict and pydantic shapes from ollama-python

ollama-python 0.3.x returns the response as a dict, while newer releases
return pydantic objects. The backend assumed objects (response.message)
and crashed with AttributeError on every dispatch. Use a helper that
accepts either shape so the code works across versions.
This commit is contained in:
Carlos Garcia
2026-04-24 23:16:36 -04:00
parent f020d1406a
commit 01adfbfb1a

View File

@@ -35,16 +35,28 @@ class OllamaBackend:
raise OllamaUnavailableError(f'Ollama unreachable: {exc}') from exc
raise OllamaUnavailableError(f'Ollama error: {exc}') from exc
ms = int((time.monotonic() - t0) * 1000)
msg = response.message
# ollama-python returns dicts in 0.3.x, pydantic objects in newer
# releases — accept either shape.
def _get(obj, key, default=None):
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
msg = _get(response, 'message') or {}
raw_tool_calls = _get(msg, 'tool_calls')
tool_calls = None
if hasattr(msg, 'tool_calls') and msg.tool_calls:
tool_calls = [{'name': tc.function.name, 'arguments': tc.function.arguments}
for tc in msg.tool_calls]
tin = response.prompt_eval_count or 0
tout = response.eval_count or 0
if raw_tool_calls:
tool_calls = []
for tc in raw_tool_calls:
fn = _get(tc, 'function') or {}
tool_calls.append({
'name': _get(fn, 'name'),
'arguments': _get(fn, 'arguments'),
})
tin = _get(response, 'prompt_eval_count') or 0
tout = _get(response, 'eval_count') or 0
logger.info('ollama caller=%s wait_ms=%d inf_ms=%d tin=%d tout=%d',
caller, wait_ms, ms, tin, tout)
return LLMResponse(content=msg.content or '', tool_calls=tool_calls,
return LLMResponse(content=_get(msg, 'content') or '', tool_calls=tool_calls,
backend_used='ollama', model_used=self._model,
tokens_in=tin, tokens_out=tout, latency_ms=ms)
finally: