v4.1.0: DMS email documents, category-specific Nachweis linking, version system
Some checks failed
CI/CD Pipeline / test (push) Has been cancelled
CI/CD Pipeline / deploy (push) Has been cancelled
Code Quality / quality (push) Has been cancelled

- Save cover email body as DMS document with new 'email' context type
- Show email body separately from attachments in email detail view
- Add per-category DMS document assignment in quarterly confirmation
  (Studiennachweis, Einkommenssituation, Vermögenssituation)
- Add VERSION file and context processor for automatic version display
- Add MCP server, agent system, import/export, and new migrations
- Update compose files and production environment template

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
SysAdmin Agent
2026-03-15 18:48:52 +00:00
parent faeb7c1073
commit e0b377014c
49 changed files with 5913 additions and 55 deletions

View File

@@ -0,0 +1,201 @@
"""
ReAct-Orchestrator für den AI Agent.
Implementiert einen synchronen ReAct-Loop (Reason + Act) mit:
- max. 5 Iterationen
- Tool-Calling
- Streaming via Generator
- Audit-Logging
"""
from __future__ import annotations
import json
import logging
from typing import Generator
from .providers import get_provider, LLMError
from .tools import execute_tool, TOOL_SCHEMAS
logger = logging.getLogger(__name__)
MAX_ITERATIONS = 5
def run_agent_stream(
session,
user_message: str,
page_context: str = "",
user=None,
) -> Generator[str, None, None]:
"""
Führt den ReAct-Loop aus und streamt SSE-kompatible Daten-Strings.
Yield-Format (Server-Sent Events):
"data: {json}\n\n"
JSON-Typen:
{"type": "text", "content": "..."} Textfragment
{"type": "tool_start", "name": "..."} Tool wird aufgerufen
{"type": "tool_result", "name": "...", "result": "..."}
{"type": "done"}
{"type": "error", "message": "..."}
"""
from .models import AgentConfig, ChatMessage
config = AgentConfig.get_config()
# Systemkontext aufbauen
system_content = config.system_prompt
if page_context:
system_content += f"\n\nAktueller Seitenkontext:\n{page_context}"
# Nachrichtenhistorie laden (letzte 20 Nachrichten)
history = list(
session.messages.exclude(role="tool")
.order_by("-created_at")[:20]
)
history.reverse()
messages = [{"role": "system", "content": system_content}]
for msg in history:
if msg.role in ("user", "assistant"):
messages.append({"role": msg.role, "content": msg.content})
# Neue User-Nachricht
messages.append({"role": "user", "content": user_message})
# Neue User-Message in DB speichern
ChatMessage.objects.create(
session=session,
role="user",
content=user_message,
)
# Sesstionttitel setzen falls leer
if not session.title and user_message:
session.title = user_message[:100]
session.save(update_fields=["title", "updated_at"])
tools = TOOL_SCHEMAS if not getattr(config, "allow_write", False) else TOOL_SCHEMAS
try:
provider = get_provider(config)
except LLMError as e:
yield _sse({"type": "error", "message": str(e)})
return
full_assistant_text = ""
iteration = 0
tools_disabled = False
while iteration < MAX_ITERATIONS:
iteration += 1
text_buffer = ""
pending_tool_calls = []
current_tools = None if tools_disabled else tools
try:
for chunk in provider.chat_stream(messages=messages, tools=current_tools):
chunk_type = chunk.get("type")
if chunk_type == "text":
text = chunk["content"]
text_buffer += text
full_assistant_text += text
yield _sse({"type": "text", "content": text})
elif chunk_type == "tool_call":
pending_tool_calls.append(chunk)
elif chunk_type == "done":
break
elif chunk_type == "error":
yield _sse({"type": "error", "message": chunk.get("message", "Unbekannter Fehler")})
return
except LLMError as e:
if not tools_disabled and iteration == 1:
# Tool-Calling hat den Provider zum Absturz gebracht (z.B. OOM).
# Fallback: ohne Tools erneut versuchen.
# Warte kurz, damit Ollama nach OOM-Crash neu starten kann.
import time
logger.warning("LLM-Fehler mit Tools, Fallback auf Chat-only: %s", e)
tools_disabled = True
full_assistant_text = ""
time.sleep(15)
continue
yield _sse({"type": "error", "message": str(e)})
return
if not pending_tool_calls:
# Kein Tool-Call → Antwort fertig
break
# Tool-Calls verarbeiten
# Assistent-Nachricht mit tool_calls in History
tool_calls_for_msg = []
for tc in pending_tool_calls:
tool_calls_for_msg.append({
"id": tc["id"],
"type": "function",
"function": {
"name": tc["name"],
"arguments": json.dumps(tc["arguments"], ensure_ascii=False),
},
})
assistant_msg: dict = {"role": "assistant", "content": text_buffer or ""}
if tool_calls_for_msg:
assistant_msg["tool_calls"] = tool_calls_for_msg
messages.append(assistant_msg)
# Jeden Tool-Call ausführen
for tc in pending_tool_calls:
tool_name = tc["name"]
tool_args = tc["arguments"]
tool_call_id = tc["id"]
yield _sse({"type": "tool_start", "name": tool_name})
result = execute_tool(tool_name, tool_args, user)
# Tool-Ergebnis in DB
ChatMessage.objects.create(
session=session,
role="tool",
content=result,
tool_name=tool_name,
tool_call_id=tool_call_id,
)
yield _sse({"type": "tool_result", "name": tool_name, "result": result[:500]})
# Tool-Ergebnis in Messages für nächste LLM-Iteration
messages.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": result,
})
full_assistant_text = "" # Reset für nächste Iteration
# Abschließende Assistent-Nachricht in DB speichern
if full_assistant_text:
ChatMessage.objects.create(
session=session,
role="assistant",
content=full_assistant_text,
)
# Session updated_at aktualisieren
from django.utils import timezone
session.updated_at = timezone.now()
session.save(update_fields=["updated_at"])
yield _sse({"type": "done"})
def _sse(data: dict) -> str:
"""Formatiert ein Dict als SSE data-Zeile."""
return f"data: {json.dumps(data, ensure_ascii=False)}\n\n"