feat: Implement LLM streaming support and enhance event handling in review process

This commit is contained in:
Primakov Alexandr Alexandrovich
2025-10-13 17:48:03 +03:00
parent 2f29ccff74
commit 1d953f554b
6 changed files with 107 additions and 45 deletions
+26 -6
View File
@@ -99,7 +99,8 @@ class CodeAnalyzer:
diff: str,
language: Optional[str] = None,
pr_title: str = "",
pr_description: str = ""
pr_description: str = "",
on_llm_chunk: Optional[callable] = None
) -> List[Dict[str, Any]]:
"""Analyze code diff and return comments"""
@@ -154,13 +155,32 @@ class CodeAnalyzer:
try:
print(f"\n⏳ Отправка запроса к Ollama ({self.llm.model})...")
# Создаем chain с LLM и JSON парсером
chain = self.llm | self.json_parser
# Собираем полный ответ из streaming chunks
full_response = ""
chunk_count = 0
# Получаем результат
result = await chain.ainvoke(prompt)
print(f"\n🤖 STREAMING AI ответ:")
print("-" * 80)
print(f"\n🤖 ОТВЕТ AI (распарсен через JsonOutputParser):")
# Используем streaming
async for chunk in self.llm.astream(prompt):
chunk_count += 1
full_response += chunk
# Отправляем chunk через callback
if on_llm_chunk:
await on_llm_chunk(chunk, file_path)
# Показываем в консоли
print(chunk, end='', flush=True)
print("\n" + "-" * 80)
print(f"✅ Получено {chunk_count} chunks, всего {len(full_response)} символов")
# Парсим финальный результат
result = self.json_parser.parse(full_response)
print(f"\n🤖 РАСПАРСЕННЫЙ результат:")
print("-" * 80)
print(json.dumps(result, ensure_ascii=False, indent=2)[:500] + "...")
print("-" * 80)