feat: Implement LLM streaming support and enhance event handling in review process
This commit is contained in:
@@ -315,6 +315,17 @@ class ReviewerAgent:
|
||||
print(f" ⚠️ ПРОПУСК: patch пустой или слишком маленький")
|
||||
continue
|
||||
|
||||
# Callback для LLM streaming
|
||||
async def on_llm_chunk(chunk: str, file: str):
|
||||
"""Handle LLM streaming chunks"""
|
||||
if self._stream_callback:
|
||||
await self._stream_callback({
|
||||
"type": "llm_chunk",
|
||||
"chunk": chunk,
|
||||
"file_path": file,
|
||||
"message": chunk
|
||||
})
|
||||
|
||||
# Analyze diff with PR context
|
||||
pr_info = state.get("pr_info", {})
|
||||
comments = await self.analyzer.analyze_diff(
|
||||
@@ -322,7 +333,8 @@ class ReviewerAgent:
|
||||
diff=patch,
|
||||
language=language,
|
||||
pr_title=pr_info.get("title", ""),
|
||||
pr_description=pr_info.get("description", "")
|
||||
pr_description=pr_info.get("description", ""),
|
||||
on_llm_chunk=on_llm_chunk
|
||||
)
|
||||
|
||||
print(f" 💬 Получено комментариев: {len(comments)}")
|
||||
|
||||
Reference in New Issue
Block a user