feat: Add live streaming to Chat page

- Replace blocking execute_agent_task() with live subprocess streaming
- Use Popen() to read opencode output line-by-line in real-time
- Send 'chunk' events to frontend as agent thinks
- Frontend appends chunks incrementally for live response
- Matches Orchestrator's streaming UX
- No more waiting for complete response before seeing output
This commit is contained in:
pdyde 2026-02-21 17:32:37 +01:00
parent d620f496a8
commit e2a853ffde
3 changed files with 408 additions and 7 deletions

66
app.py
View file

@ -2198,7 +2198,7 @@ def chat():
@app.route('/chat/send', methods=['POST'])
def chat_send():
"""Führt einen Agent aus und gibt die Antwort per Server-Sent Events zurück."""
"""Führt einen Agent aus und gibt die Antwort per Server-Sent Events LIVE zurück."""
data = request.get_json()
prompt = data.get('prompt', '').strip()
agent_key = data.get('agent', '').strip()
@ -2216,17 +2216,69 @@ def chat_send():
def generate():
# Agent-Info senden
yield f"data: {json.dumps({'type': 'agent_selected', 'agent': agent_name, 'agent_key': agent_key})}\n\n"
yield f"data: {json.dumps({'type': 'processing', 'message': f'{agent_name} arbeitet...'})}\n\n"
yield f"data: {json.dumps({'type': 'processing', 'message': f'{agent_name} denkt nach...'})}\n\n"
try:
# Agent ausführen (mit Memory und Work-Dir)
response = execute_agent_task(agent_key, prompt)
# Agent live ausführen mit Streaming
response_text = ""
# Antwort streamen
yield f"data: {json.dumps({'type': 'response', 'text': response})}\n\n"
# System-Prompt vorbereiten
system_prompt = get_agent_prompt(agent_key)
if not system_prompt:
yield f"data: {json.dumps({'type': 'error', 'message': f'Kein System-Prompt für Agent {agent_key}'})}\n\n"
return
dirs = ensure_agent_structure(agent_key)
work_dir = dirs['work_dir']
memory_summary = get_agent_memory_summary(agent_key)
kb_file = os.path.join(os.path.dirname(__file__), 'diversityball_knowledge.md')
team_summary = ""
if agent_key == 'orchestrator':
team_summary = "\n\n" + get_team_member_summary()
full_system = f"""{system_prompt}
## Deine Erinnerungen:
{memory_summary}{team_summary}
## Wissensdatenbank:
Die Wissensdatenbank liegt unter: {kb_file}
- Du hast Zugriff auf das Internet via WebFetch-Tool
- Dein Arbeitsverzeichnis: {work_dir}"""
combined_message = f"{full_system}\n\n---\n\n{prompt}"
model = get_agent_model(agent_key)
# OpenCode mit Streaming aufrufen
process = subprocess.Popen(
['opencode', 'run', '--model', model, '--format', 'json', combined_message],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
cwd=work_dir
)
# Live-Output lesen und streamen
for line in process.stdout:
try:
data_json = json.loads(line.strip())
if data_json.get('part', {}).get('type') == 'text':
chunk = data_json.get('part', {}).get('text', '')
response_text += chunk
# Chunk live an Frontend senden
yield f"data: {json.dumps({'type': 'chunk', 'text': chunk})}\n\n"
except (json.JSONDecodeError, KeyError):
pass
process.wait()
# Agent-Kommandos parsen
if response_text:
parse_agent_commands(agent_key, response_text)
# Erfolg melden
yield f"data: {json.dumps({'type': 'complete', 'message': '✓ Fertig', 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M'), 'response': response})}\n\n"
yield f"data: {json.dumps({'type': 'complete', 'message': '✓ Fertig', 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M'), 'response': response_text})}\n\n"
except Exception as e:
logger.error(f"[Chat] Fehler beim Ausführen von {agent_key}: {str(e)}")