fix: increase timeout and parallelize receipt processing
- ab_ai_bot: raise requests.post timeout 120s -> 600s so long OCR+LLM runs don't silently drop the reply in Discuss - upload: run parse_upload in ThreadPoolExecutor so tesseract OCR doesn't block the FastAPI event loop - expenses_agent: parse all receipts concurrently with asyncio.gather (Ollama semaphore caps parallelism at 2); reduces 13-receipt LLM time from ~39s sequential to ~20s parallel Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -102,7 +102,7 @@ class AbAiBot(models.Model):
|
||||
if session_id:
|
||||
payload['session_id'] = session_id
|
||||
try:
|
||||
resp = requests.post(url, json=payload, headers=self._build_headers(), timeout=120)
|
||||
resp = requests.post(url, json=payload, headers=self._build_headers(), timeout=600)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
except requests.exceptions.Timeout:
|
||||
@@ -140,7 +140,7 @@ class AbAiBot(models.Model):
|
||||
|
||||
try:
|
||||
resp = requests.post(url, data=form_data, files=files or [('files', ('empty', b'', 'text/plain'))],
|
||||
headers=headers, timeout=120)
|
||||
headers=headers, timeout=600)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
except requests.exceptions.Timeout:
|
||||
|
||||
Reference in New Issue
Block a user