HadTavern 0.01: Gemini/Claude fixes; UI _origId reuse; docs; .bat open
This commit is contained in:
@@ -3,13 +3,14 @@ import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
import json
|
||||
from urllib.parse import urlsplit, urlunsplit, parse_qsl, urlencode, unquote
|
||||
from fastapi.responses import JSONResponse, HTMLResponse
|
||||
from fastapi.responses import JSONResponse, HTMLResponse, StreamingResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
from agentui.pipeline.executor import PipelineExecutor
|
||||
from agentui.pipeline.defaults import default_pipeline
|
||||
from agentui.pipeline.storage import load_pipeline, save_pipeline, list_presets, load_preset, save_preset
|
||||
from agentui.common.vendors import detect_vendor
|
||||
|
||||
|
||||
class UnifiedParams(BaseModel):
|
||||
@@ -38,17 +39,6 @@ class UnifiedChatRequest(BaseModel):
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
def detect_vendor(payload: Dict[str, Any]) -> str:
|
||||
if "anthropic_version" in payload or payload.get("provider") == "anthropic":
|
||||
return "claude"
|
||||
# Gemini typical payload keys
|
||||
if "contents" in payload or "generationConfig" in payload:
|
||||
return "gemini"
|
||||
# OpenAI typical keys
|
||||
if "messages" in payload or "model" in payload:
|
||||
return "openai"
|
||||
return "unknown"
|
||||
|
||||
|
||||
def normalize_to_unified(payload: Dict[str, Any]) -> UnifiedChatRequest:
|
||||
vendor = detect_vendor(payload)
|
||||
@@ -278,6 +268,34 @@ def create_app() -> FastAPI:
|
||||
logger.addHandler(stream_handler)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
# --- Simple in-process SSE hub (subscriptions per browser tab) ---
|
||||
import asyncio as _asyncio
|
||||
|
||||
class _SSEHub:
|
||||
def __init__(self) -> None:
|
||||
self._subs: List[_asyncio.Queue] = []
|
||||
|
||||
def subscribe(self) -> _asyncio.Queue:
|
||||
q: _asyncio.Queue = _asyncio.Queue()
|
||||
self._subs.append(q)
|
||||
return q
|
||||
|
||||
def unsubscribe(self, q: _asyncio.Queue) -> None:
|
||||
try:
|
||||
self._subs.remove(q)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
async def publish(self, event: Dict[str, Any]) -> None:
|
||||
# Fan-out to all subscribers; drop if queue is full
|
||||
for q in list(self._subs):
|
||||
try:
|
||||
await q.put(event)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_trace_hub = _SSEHub()
|
||||
|
||||
def _mask_headers(h: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# Временно отключаем маскировку Authorization для отладки
|
||||
hidden = {"x-api-key", "cookie"}
|
||||
@@ -369,7 +387,15 @@ def create_app() -> FastAPI:
|
||||
macro_ctx = build_macro_context(unified, incoming=incoming)
|
||||
pipeline = load_pipeline()
|
||||
executor = PipelineExecutor(pipeline)
|
||||
last = await executor.run(macro_ctx)
|
||||
|
||||
async def _trace(evt: Dict[str, Any]) -> None:
|
||||
try:
|
||||
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
|
||||
await _trace_hub.publish({**base, **evt})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
last = await executor.run(macro_ctx, trace=_trace)
|
||||
result = last.get("result") or await execute_pipeline_echo(unified)
|
||||
await _log_response(request, 200, result)
|
||||
return JSONResponse(result)
|
||||
@@ -402,7 +428,13 @@ def create_app() -> FastAPI:
|
||||
macro_ctx = build_macro_context(unified, incoming=incoming)
|
||||
pipeline = load_pipeline()
|
||||
executor = PipelineExecutor(pipeline)
|
||||
last = await executor.run(macro_ctx)
|
||||
async def _trace(evt: Dict[str, Any]) -> None:
|
||||
try:
|
||||
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
|
||||
await _trace_hub.publish({**base, **evt})
|
||||
except Exception:
|
||||
pass
|
||||
last = await executor.run(macro_ctx, trace=_trace)
|
||||
result = last.get("result") or await execute_pipeline_echo(unified)
|
||||
await _log_response(request, 200, result)
|
||||
return JSONResponse(result)
|
||||
@@ -431,7 +463,13 @@ def create_app() -> FastAPI:
|
||||
macro_ctx = build_macro_context(unified, incoming=incoming)
|
||||
pipeline = load_pipeline()
|
||||
executor = PipelineExecutor(pipeline)
|
||||
last = await executor.run(macro_ctx)
|
||||
async def _trace(evt: Dict[str, Any]) -> None:
|
||||
try:
|
||||
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
|
||||
await _trace_hub.publish({**base, **evt})
|
||||
except Exception:
|
||||
pass
|
||||
last = await executor.run(macro_ctx, trace=_trace)
|
||||
result = last.get("result") or await execute_pipeline_echo(unified)
|
||||
await _log_response(request, 200, result)
|
||||
return JSONResponse(result)
|
||||
@@ -465,7 +503,13 @@ def create_app() -> FastAPI:
|
||||
macro_ctx = build_macro_context(unified, incoming=incoming)
|
||||
pipeline = load_pipeline()
|
||||
executor = PipelineExecutor(pipeline)
|
||||
last = await executor.run(macro_ctx)
|
||||
async def _trace(evt: Dict[str, Any]) -> None:
|
||||
try:
|
||||
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
|
||||
await _trace_hub.publish({**base, **evt})
|
||||
except Exception:
|
||||
pass
|
||||
last = await executor.run(macro_ctx, trace=_trace)
|
||||
result = last.get("result") or await execute_pipeline_echo(unified)
|
||||
await _log_response(request, 200, result)
|
||||
return JSONResponse(result)
|
||||
@@ -498,7 +542,13 @@ def create_app() -> FastAPI:
|
||||
macro_ctx = build_macro_context(unified, incoming=incoming)
|
||||
pipeline = load_pipeline()
|
||||
executor = PipelineExecutor(pipeline)
|
||||
last = await executor.run(macro_ctx)
|
||||
async def _trace(evt: Dict[str, Any]) -> None:
|
||||
try:
|
||||
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
|
||||
await _trace_hub.publish({**base, **evt})
|
||||
except Exception:
|
||||
pass
|
||||
last = await executor.run(macro_ctx, trace=_trace)
|
||||
result = last.get("result") or await execute_pipeline_echo(unified)
|
||||
await _log_response(request, 200, result)
|
||||
return JSONResponse(result)
|
||||
@@ -532,11 +582,16 @@ def create_app() -> FastAPI:
|
||||
macro_ctx = build_macro_context(unified, incoming=incoming)
|
||||
pipeline = load_pipeline()
|
||||
executor = PipelineExecutor(pipeline)
|
||||
last = await executor.run(macro_ctx)
|
||||
async def _trace(evt: Dict[str, Any]) -> None:
|
||||
try:
|
||||
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
|
||||
await _trace_hub.publish({**base, **evt})
|
||||
except Exception:
|
||||
pass
|
||||
last = await executor.run(macro_ctx, trace=_trace)
|
||||
result = last.get("result") or await execute_pipeline_echo(unified)
|
||||
await _log_response(request, 200, result)
|
||||
return JSONResponse(result)
|
||||
|
||||
app.mount("/ui", StaticFiles(directory="static", html=True), name="ui")
|
||||
|
||||
# Admin API для пайплайна
|
||||
@@ -580,6 +635,30 @@ def create_app() -> FastAPI:
|
||||
raise HTTPException(status_code=400, detail="Invalid pipeline format")
|
||||
save_preset(name, payload)
|
||||
return JSONResponse({"ok": True})
|
||||
# --- SSE endpoint for live pipeline trace ---
|
||||
@app.get("/admin/trace/stream")
|
||||
async def sse_trace() -> StreamingResponse:
|
||||
loop = _asyncio.get_event_loop()
|
||||
q = _trace_hub.subscribe()
|
||||
|
||||
async def _gen():
|
||||
try:
|
||||
# warm-up: send a comment to keep connection open
|
||||
yield ":ok\n\n"
|
||||
while True:
|
||||
evt = await q.get()
|
||||
try:
|
||||
line = f"data: {json.dumps(evt, ensure_ascii=False)}\n\n"
|
||||
except Exception:
|
||||
line = "data: {}\n\n"
|
||||
yield line
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
_trace_hub.unsubscribe(q)
|
||||
|
||||
return StreamingResponse(_gen(), media_type="text/event-stream")
|
||||
|
||||
return app
|
||||
|
||||
|
||||
|
||||
1
agentui/common/__init__.py
Normal file
1
agentui/common/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
__all__ = ["vendors"]
|
||||
43
agentui/common/vendors.py
Normal file
43
agentui/common/vendors.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
__all__ = ["detect_vendor"]
|
||||
|
||||
|
||||
def detect_vendor(payload: Dict[str, Any] | None) -> str:
|
||||
"""
|
||||
Определение вендора по форме payload.
|
||||
Возвращает одно из: "openai" | "gemini" | "claude" | "unknown".
|
||||
|
||||
Правила (порядок важен):
|
||||
- Anthropic (Claude):
|
||||
* наличие ключа "anthropic_version" (официальный заголовок/поле)
|
||||
* явный маркер provider == "anthropic"
|
||||
- Gemini:
|
||||
* наличие "contents" или "generationConfig" (Google AI Studio / Vertex)
|
||||
- OpenAI:
|
||||
* наличие "messages" или "model"
|
||||
- Фоллбэк: "unknown"
|
||||
"""
|
||||
if not isinstance(payload, dict):
|
||||
return "unknown"
|
||||
|
||||
# Явные подсказки, если заранее указали
|
||||
hint = str(payload.get("vendor_format") or payload.get("vendor") or "").lower()
|
||||
if hint in {"openai", "gemini", "claude"}:
|
||||
return hint
|
||||
|
||||
# Anthropic (Claude)
|
||||
if "anthropic_version" in payload or payload.get("provider") == "anthropic":
|
||||
return "claude"
|
||||
|
||||
# Gemini (Google)
|
||||
if "contents" in payload or "generationConfig" in payload:
|
||||
return "gemini"
|
||||
|
||||
# OpenAI
|
||||
if "messages" in payload or "model" in payload:
|
||||
return "openai"
|
||||
|
||||
return "unknown"
|
||||
@@ -1,175 +1,37 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Callable, Awaitable
|
||||
from urllib.parse import urljoin
|
||||
import json
|
||||
import re
|
||||
import asyncio
|
||||
import time
|
||||
from agentui.providers.http_client import build_client
|
||||
from agentui.common.vendors import detect_vendor
|
||||
from agentui.pipeline.templating import (
|
||||
_OUT_MACRO_RE,
|
||||
_VAR_MACRO_RE,
|
||||
_PROMPT_MACRO_RE,
|
||||
_OUT_SHORT_RE,
|
||||
_BRACES_RE,
|
||||
_split_path,
|
||||
_get_by_path,
|
||||
_stringify_for_template,
|
||||
_deep_find_text,
|
||||
_best_text_from_outputs,
|
||||
render_template_simple,
|
||||
)
|
||||
|
||||
|
||||
# --- Templating helpers ------------------------------------------------------
|
||||
# --- Templating helpers are imported from agentui.pipeline.templating ---
|
||||
|
||||
_OUT_MACRO_RE = re.compile(r"\[\[\s*OUT\s*[:\s]\s*([^\]]+?)\s*\]\]", re.IGNORECASE)
|
||||
_VAR_MACRO_RE = re.compile(r"\[\[\s*VAR\s*[:\s]\s*([^\]]+?)\s*\]\]", re.IGNORECASE)
|
||||
# Unified prompt fragment macro (provider-specific JSON fragment)
|
||||
_PROMPT_MACRO_RE = re.compile(r"\[\[\s*PROMPT\s*\]\]", re.IGNORECASE)
|
||||
# Short form: [[OUT1]] -> best-effort text from node n1
|
||||
_OUT_SHORT_RE = re.compile(r"\[\[\s*OUT\s*(\d+)\s*\]\]", re.IGNORECASE)
|
||||
_BRACES_RE = re.compile(r"\{\{\s*([^}]+?)\s*\}\}")
|
||||
# moved to agentui.pipeline.templating
|
||||
|
||||
# moved to agentui.pipeline.templating
|
||||
|
||||
def _split_path(path: str) -> List[str]:
|
||||
return [p.strip() for p in str(path).split(".") if str(p).strip()]
|
||||
|
||||
|
||||
def _get_by_path(obj: Any, path: Optional[str]) -> Any:
|
||||
if path is None or path == "":
|
||||
return obj
|
||||
cur = obj
|
||||
for seg in _split_path(path):
|
||||
if isinstance(cur, dict):
|
||||
if seg in cur:
|
||||
cur = cur[seg]
|
||||
else:
|
||||
return None
|
||||
elif isinstance(cur, list):
|
||||
try:
|
||||
idx = int(seg)
|
||||
except Exception: # noqa: BLE001
|
||||
return None
|
||||
if 0 <= idx < len(cur):
|
||||
cur = cur[idx]
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
return cur
|
||||
|
||||
|
||||
def _stringify_for_template(val: Any) -> str:
|
||||
if val is None:
|
||||
return ""
|
||||
if isinstance(val, bool):
|
||||
# JSON-friendly booleans (useful when embedding into JSON-like templates)
|
||||
return "true" if val else "false"
|
||||
if isinstance(val, (dict, list)):
|
||||
try:
|
||||
return json.dumps(val, ensure_ascii=False)
|
||||
except Exception: # noqa: BLE001
|
||||
return str(val)
|
||||
return str(val)
|
||||
|
||||
|
||||
def _deep_find_text(obj: Any, max_nodes: int = 5000) -> Optional[str]:
|
||||
"""
|
||||
Best-effort поиск первого текстового значения в глубине структуры JSON.
|
||||
Сначала пытаемся по ключам content/text, затем общий обход.
|
||||
"""
|
||||
try:
|
||||
# Быстрые ветки
|
||||
if isinstance(obj, str):
|
||||
return obj
|
||||
if isinstance(obj, dict):
|
||||
c = obj.get("content")
|
||||
if isinstance(c, str):
|
||||
return c
|
||||
t = obj.get("text")
|
||||
if isinstance(t, str):
|
||||
return t
|
||||
parts = obj.get("parts")
|
||||
if isinstance(parts, list) and parts:
|
||||
for p in parts:
|
||||
if isinstance(p, dict) and isinstance(p.get("text"), str):
|
||||
return p.get("text")
|
||||
|
||||
# Общий нерекурсивный обход в ширину
|
||||
queue: List[Any] = [obj]
|
||||
seen = 0
|
||||
while queue and seen < max_nodes:
|
||||
cur = queue.pop(0)
|
||||
seen += 1
|
||||
if isinstance(cur, str):
|
||||
return cur
|
||||
if isinstance(cur, dict):
|
||||
# часто встречающиеся поля
|
||||
for k in ("text", "content"):
|
||||
v = cur.get(k)
|
||||
if isinstance(v, str):
|
||||
return v
|
||||
# складываем все значения
|
||||
for v in cur.values():
|
||||
queue.append(v)
|
||||
elif isinstance(cur, list):
|
||||
for it in cur:
|
||||
queue.append(it)
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _best_text_from_outputs(node_out: Any) -> str:
|
||||
"""
|
||||
Унифицированное извлечение "текста" из выхода ноды.
|
||||
Поддерживает:
|
||||
- PromptTemplate: {"text": ...}
|
||||
- LLMInvoke: {"response_text": ...}
|
||||
- ProviderCall/RawForward: {"result": <provider_json>}, извлекаем текст для openai/gemini/claude
|
||||
- Общий глубокий поиск текста, если специфичные ветки не сработали
|
||||
"""
|
||||
# Строка сразу
|
||||
if isinstance(node_out, str):
|
||||
return node_out
|
||||
|
||||
if not isinstance(node_out, dict):
|
||||
return ""
|
||||
|
||||
# Явные короткие поля
|
||||
if isinstance(node_out.get("response_text"), str) and node_out.get("response_text"):
|
||||
return str(node_out["response_text"])
|
||||
if isinstance(node_out.get("text"), str) and node_out.get("text"):
|
||||
return str(node_out["text"])
|
||||
|
||||
res = node_out.get("result")
|
||||
base = res if isinstance(res, (dict, list)) else node_out
|
||||
|
||||
# OpenAI
|
||||
try:
|
||||
if isinstance(base, dict):
|
||||
ch0 = (base.get("choices") or [{}])[0]
|
||||
msg = ch0.get("message") or {}
|
||||
c = msg.get("content")
|
||||
if isinstance(c, str):
|
||||
return c
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Gemini
|
||||
try:
|
||||
if isinstance(base, dict):
|
||||
cand0 = (base.get("candidates") or [{}])[0]
|
||||
content = cand0.get("content") or {}
|
||||
parts0 = (content.get("parts") or [{}])[0]
|
||||
t = parts0.get("text")
|
||||
if isinstance(t, str):
|
||||
return t
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Claude
|
||||
try:
|
||||
if isinstance(base, dict):
|
||||
blocks = base.get("content") or []
|
||||
texts = [b.get("text") for b in blocks if isinstance(b, dict) and isinstance(b.get("text"), str)]
|
||||
if texts:
|
||||
return "\n".join(texts)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Общий глубокий поиск
|
||||
txt = _deep_find_text(base)
|
||||
return txt or ""
|
||||
# moved to agentui.pipeline.templating
|
||||
|
||||
# moved to agentui.pipeline.templating
|
||||
|
||||
def _extract_out_node_id_from_ref(s: Any) -> Optional[str]:
|
||||
"""
|
||||
@@ -190,12 +52,16 @@ def _resolve_in_value(source: Any, context: Dict[str, Any], values: Dict[str, Di
|
||||
"""
|
||||
Разрешает входные связи/макросы в значение для inputs:
|
||||
- Нестроковые значения возвращаются как есть
|
||||
- "macro:path" → берёт значение из context по точечному пути
|
||||
- "[[VAR:path]]" → берёт значение из context
|
||||
- "[[OUT:nodeId(.path)*]]" → берёт из уже вычисленных выходов ноды
|
||||
- "nodeId(.path)*" → ссылка на выходы ноды
|
||||
- "macro:path" → берёт значение из context по точечному пути
|
||||
- "[[VAR:path]]" → берёт значение из context
|
||||
- "[[OUT:nodeId(.path)*]]" → берёт из уже вычисленных выходов ноды
|
||||
- "nodeId(.path)*" → ссылка на выходы ноды
|
||||
- Если передан список ссылок — вернёт список разрешённых значений
|
||||
- Иначе пытается взять из context по пути; если не найдено, оставляет исходную строку
|
||||
"""
|
||||
# Поддержка массивов ссылок (для multi-depends или будущих списковых входов)
|
||||
if isinstance(source, list):
|
||||
return [_resolve_in_value(s, context, values) for s in source]
|
||||
if not isinstance(source, str):
|
||||
return source
|
||||
s = source.strip()
|
||||
@@ -235,100 +101,7 @@ def _resolve_in_value(source: Any, context: Dict[str, Any], values: Dict[str, Di
|
||||
return ctx_val if ctx_val is not None else source
|
||||
|
||||
|
||||
def render_template_simple(template: str, context: Dict[str, Any], out_map: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Простая подстановка:
|
||||
- {{ path }} — берёт из context (или {{ OUT.node.path }} для выходов)
|
||||
- Поддержка фильтра по умолчанию: {{ path|default(value) }}
|
||||
value может быть числом, строкой ('..'/".."), массивом/объектом в виде литерала.
|
||||
- [[VAR:path]] — берёт из context
|
||||
- [[OUT:nodeId(.path)*]] — берёт из out_map
|
||||
Возвращает строку.
|
||||
"""
|
||||
if template is None:
|
||||
return ""
|
||||
s = str(template)
|
||||
|
||||
# 1) Макросы [[VAR:...]] и [[OUT:...]]
|
||||
def repl_var(m: re.Match) -> str:
|
||||
path = m.group(1).strip()
|
||||
val = _get_by_path(context, path)
|
||||
return _stringify_for_template(val)
|
||||
|
||||
def repl_out(m: re.Match) -> str:
|
||||
body = m.group(1).strip()
|
||||
if "." in body:
|
||||
node_id, rest = body.split(".", 1)
|
||||
node_val = out_map.get(node_id)
|
||||
val = _get_by_path(node_val, rest)
|
||||
else:
|
||||
val = out_map.get(body)
|
||||
return _stringify_for_template(val)
|
||||
|
||||
s = _VAR_MACRO_RE.sub(repl_var, s)
|
||||
s = _OUT_MACRO_RE.sub(repl_out, s)
|
||||
|
||||
# [[OUT1]] → текст из ноды n1 (best-effort)
|
||||
def repl_out_short(m: re.Match) -> str:
|
||||
try:
|
||||
num = int(m.group(1))
|
||||
node_id = f"n{num}"
|
||||
node_out = out_map.get(node_id)
|
||||
txt = _best_text_from_outputs(node_out)
|
||||
return _stringify_for_template(txt)
|
||||
except Exception:
|
||||
return ""
|
||||
s = _OUT_SHORT_RE.sub(repl_out_short, s)
|
||||
|
||||
# [[PROMPT]] expands to raw provider-specific JSON fragment prepared in context["PROMPT"]
|
||||
s = _PROMPT_MACRO_RE.sub(lambda _m: str(context.get("PROMPT") or ""), s)
|
||||
|
||||
# 2) Подстановки {{ ... }} (+ simple default filter)
|
||||
def repl_braces(m: re.Match) -> str:
|
||||
expr = m.group(1).strip()
|
||||
|
||||
def eval_path(p: str) -> Any:
|
||||
p = p.strip()
|
||||
if p.startswith("OUT."):
|
||||
body = p[4:]
|
||||
if "." in body:
|
||||
node_id, rest = body.split(".", 1)
|
||||
node_val = out_map.get(node_id)
|
||||
return _get_by_path(node_val, rest)
|
||||
return out_map.get(body)
|
||||
return _get_by_path(context, p)
|
||||
|
||||
default_match = re.match(r"([^|]+)\|\s*default\((.*)\)\s*$", expr)
|
||||
if default_match:
|
||||
base_path = default_match.group(1).strip()
|
||||
fallback_raw = default_match.group(2).strip()
|
||||
# Снимем внешние кавычки, если это строковый литерал
|
||||
if len(fallback_raw) >= 2 and ((fallback_raw[0] == "'" and fallback_raw[-1] == "'") or (fallback_raw[0] == '"' and fallback_raw[-1] == '"')):
|
||||
fallback_val: Any = fallback_raw[1:-1]
|
||||
else:
|
||||
# Иначе оставляем как есть (числа/массивы/объекты — литералами)
|
||||
fallback_val = fallback_raw
|
||||
raw_val = eval_path(base_path)
|
||||
val = raw_val if raw_val not in (None, "") else fallback_val
|
||||
else:
|
||||
val = eval_path(expr)
|
||||
|
||||
return _stringify_for_template(val)
|
||||
|
||||
s = _BRACES_RE.sub(repl_braces, s)
|
||||
return s
|
||||
def detect_vendor(payload: Dict[str, Any]) -> str:
|
||||
if not isinstance(payload, dict):
|
||||
return "unknown"
|
||||
if "anthropic_version" in payload or payload.get("provider") == "anthropic":
|
||||
return "claude"
|
||||
# Gemini typical payload keys
|
||||
if "contents" in payload or "generationConfig" in payload:
|
||||
return "gemini"
|
||||
# OpenAI typical keys
|
||||
if "messages" in payload or "model" in payload:
|
||||
return "openai"
|
||||
return "unknown"
|
||||
# moved to agentui.pipeline.templating
|
||||
|
||||
|
||||
class ExecutionError(Exception):
|
||||
@@ -360,7 +133,11 @@ class PipelineExecutor:
|
||||
raise ExecutionError(f"Unknown node type: {n.get('type')}")
|
||||
self.nodes_by_id[n["id"]] = node_cls(n["id"], n.get("config", {}))
|
||||
|
||||
async def run(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
async def run(
|
||||
self,
|
||||
context: Dict[str, Any],
|
||||
trace: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Исполнитель пайплайна с динамическим порядком на основе зависимостей графа.
|
||||
Новый режим: волновое (level-by-level) исполнение с параллелизмом и барьером.
|
||||
@@ -378,26 +155,29 @@ class PipelineExecutor:
|
||||
for n in nodes:
|
||||
nid = n["id"]
|
||||
for _, source in (n.get("in") or {}).items():
|
||||
if not isinstance(source, str):
|
||||
# Нестрочные значения считаем константами — зависимостей нет
|
||||
continue
|
||||
if source.startswith("macro:"):
|
||||
# Макросы берутся из контекста, без зависимостей
|
||||
continue
|
||||
# [[VAR:...]] — макрос из контекста, зависимостей нет
|
||||
if re.fullmatch(r"\[\[\s*VAR\s*[:\s]\s*[^\]]+\s*\]\]", source.strip()):
|
||||
continue
|
||||
# [[OUT:nodeId(.key)*]] — зависимость от указанной ноды
|
||||
out_ref_node = _extract_out_node_id_from_ref(source)
|
||||
if out_ref_node and out_ref_node in id_set:
|
||||
deps_map[nid].add(out_ref_node)
|
||||
dependents[out_ref_node].add(nid)
|
||||
continue
|
||||
# Ссылки вида "node.outKey" или "node"
|
||||
src_id = source.split(".", 1)[0] if "." in source else source
|
||||
if src_id in id_set:
|
||||
deps_map[nid].add(src_id)
|
||||
dependents[src_id].add(nid)
|
||||
# Разворачиваем массивы ссылок (multi-depends)
|
||||
sources = source if isinstance(source, list) else [source]
|
||||
for src in sources:
|
||||
if not isinstance(src, str):
|
||||
# Нестрочные значения и массивы констант — зависимостей нет
|
||||
continue
|
||||
if src.startswith("macro:"):
|
||||
# Макросы берутся из контекста, без зависимостей
|
||||
continue
|
||||
# [[VAR:...]] — макрос из контекста, зависимостей нет
|
||||
if re.fullmatch(r"\[\[\s*VAR\s*[:\s]\s*[^\]]+\s*\]\]", src.strip()):
|
||||
continue
|
||||
# [[OUT:nodeId(.key)*]] — зависимость от указанной ноды
|
||||
out_ref_node = _extract_out_node_id_from_ref(src)
|
||||
if out_ref_node and out_ref_node in id_set:
|
||||
deps_map[nid].add(out_ref_node)
|
||||
dependents[out_ref_node].add(nid)
|
||||
continue
|
||||
# Ссылки вида "node.outKey" или "node"
|
||||
src_id = src.split(".", 1)[0] if "." in src else src
|
||||
if src_id in id_set:
|
||||
deps_map[nid].add(src_id)
|
||||
dependents[src_id].add(nid)
|
||||
|
||||
# Входящие степени и первая волна
|
||||
in_degree: Dict[str, int] = {nid: len(deps) for nid, deps in deps_map.items()}
|
||||
@@ -407,6 +187,8 @@ class PipelineExecutor:
|
||||
values: Dict[str, Dict[str, Any]] = {}
|
||||
last_result: Dict[str, Any] = {}
|
||||
node_def_by_id: Dict[str, Dict[str, Any]] = {n["id"]: n for n in nodes}
|
||||
# Накопитель пользовательских переменных (SetVars) — доступен как context["vars"]
|
||||
user_vars: Dict[str, Any] = {}
|
||||
|
||||
# Параметры параллелизма
|
||||
try:
|
||||
@@ -417,49 +199,103 @@ class PipelineExecutor:
|
||||
parallel_limit = 1
|
||||
|
||||
# Вспомогательная корутина исполнения одной ноды со снапшотом OUT
|
||||
async def exec_one(node_id: str, values_snapshot: Dict[str, Any]) -> tuple[str, Dict[str, Any]]:
|
||||
async def exec_one(node_id: str, values_snapshot: Dict[str, Any], wave_num: int) -> tuple[str, Dict[str, Any]]:
|
||||
ndef = node_def_by_id.get(node_id)
|
||||
if not ndef:
|
||||
raise ExecutionError(f"Node definition not found: {node_id}")
|
||||
node = self.nodes_by_id[node_id]
|
||||
|
||||
|
||||
# Снимок контекста и OUT на момент старта волны
|
||||
ctx = dict(context)
|
||||
ctx["OUT"] = values_snapshot
|
||||
|
||||
# Пользовательские переменные (накопленные SetVars)
|
||||
try:
|
||||
ctx["vars"] = dict(user_vars)
|
||||
except Exception:
|
||||
ctx["vars"] = {}
|
||||
|
||||
# Разрешаем inputs для ноды
|
||||
inputs: Dict[str, Any] = {}
|
||||
for name, source in (ndef.get("in") or {}).items():
|
||||
inputs[name] = _resolve_in_value(source, ctx, values_snapshot)
|
||||
if isinstance(source, list):
|
||||
inputs[name] = [_resolve_in_value(s, ctx, values_snapshot) for s in source]
|
||||
else:
|
||||
inputs[name] = _resolve_in_value(source, ctx, values_snapshot)
|
||||
|
||||
out = await node.run(inputs, ctx)
|
||||
return node_id, out
|
||||
# Трассировка старта
|
||||
if trace is not None:
|
||||
try:
|
||||
await trace({"event": "node_start", "node_id": ndef["id"], "wave": wave_num, "ts": int(time.time() * 1000)})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
started = time.perf_counter()
|
||||
try:
|
||||
out = await node.run(inputs, ctx)
|
||||
except Exception as exc:
|
||||
if trace is not None:
|
||||
try:
|
||||
await trace({
|
||||
"event": "node_error",
|
||||
"node_id": ndef["id"],
|
||||
"wave": wave_num,
|
||||
"ts": int(time.time() * 1000),
|
||||
"error": str(exc),
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
raise
|
||||
else:
|
||||
dur_ms = int((time.perf_counter() - started) * 1000)
|
||||
if trace is not None:
|
||||
try:
|
||||
await trace({
|
||||
"event": "node_done",
|
||||
"node_id": ndef["id"],
|
||||
"wave": wave_num,
|
||||
"ts": int(time.time() * 1000),
|
||||
"duration_ms": dur_ms,
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
return node_id, out
|
||||
|
||||
# Волновое исполнение
|
||||
wave_idx = 0
|
||||
while ready:
|
||||
wave_nodes = list(ready)
|
||||
ready = [] # будет заполнено после завершения волны
|
||||
wave_results: Dict[str, Dict[str, Any]] = {}
|
||||
# Один общий снапшот OUT для всей волны (барьер — узлы волны не видят результаты друг друга)
|
||||
values_snapshot = dict(values)
|
||||
|
||||
|
||||
# Чанковый запуск с лимитом parallel_limit
|
||||
for i in range(0, len(wave_nodes), parallel_limit):
|
||||
chunk = wave_nodes[i : i + parallel_limit]
|
||||
# fail-fast: при исключении любой задачи gather бросит и отменит остальные
|
||||
results = await asyncio.gather(
|
||||
*(exec_one(nid, values_snapshot) for nid in chunk),
|
||||
*(exec_one(nid, values_snapshot, wave_idx) for nid in chunk),
|
||||
return_exceptions=False,
|
||||
)
|
||||
# Коммитим результаты чанка в локальное хранилище волны
|
||||
for nid, out in results:
|
||||
wave_results[nid] = out
|
||||
last_result = out # обновляем на каждом успешном результате
|
||||
|
||||
|
||||
# После завершения волны — коммитим все её результаты в общие values
|
||||
values.update(wave_results)
|
||||
processed.extend(wave_nodes)
|
||||
|
||||
|
||||
# Соберём пользовательские переменные из SetVars узлов волны
|
||||
try:
|
||||
for _nid, out in wave_results.items():
|
||||
if isinstance(out, dict):
|
||||
v = out.get("vars")
|
||||
if isinstance(v, dict):
|
||||
user_vars.update(v)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Обновляем входящие степени для зависимых и формируем следующую волну
|
||||
for done in wave_nodes:
|
||||
for child in dependents.get(done, ()):
|
||||
@@ -467,6 +303,7 @@ class PipelineExecutor:
|
||||
next_ready = [nid for nid, deg in in_degree.items() if deg == 0 and nid not in processed and nid not in wave_nodes]
|
||||
# Исключаем уже учтённые и добавляем только те, которые действительно готовы
|
||||
ready = next_ready
|
||||
wave_idx += 1
|
||||
|
||||
# Проверка на циклы/недостижимые ноды
|
||||
if len(processed) != len(nodes):
|
||||
@@ -476,6 +313,162 @@ class PipelineExecutor:
|
||||
return last_result
|
||||
|
||||
|
||||
class SetVarsNode(Node):
|
||||
type_name = "SetVars"
|
||||
|
||||
def _normalize(self) -> List[Dict[str, Any]]:
|
||||
raw = self.config.get("variables") or []
|
||||
if not isinstance(raw, list):
|
||||
return []
|
||||
norm: List[Dict[str, Any]] = []
|
||||
for i, b in enumerate(raw):
|
||||
if not isinstance(b, dict):
|
||||
continue
|
||||
name = str(b.get("name", "")).strip()
|
||||
mode = str(b.get("mode", "string")).lower().strip()
|
||||
value = b.get("value", "")
|
||||
try:
|
||||
order = int(b.get("order")) if b.get("order") is not None else i
|
||||
except Exception:
|
||||
order = i
|
||||
norm.append({
|
||||
"id": b.get("id") or f"v{i}",
|
||||
"name": name,
|
||||
"mode": "expr" if mode == "expr" else "string",
|
||||
"value": value,
|
||||
"order": order,
|
||||
})
|
||||
return norm
|
||||
|
||||
def _safe_eval_expr(self, expr: str) -> Any:
|
||||
"""
|
||||
Безопасная оценка выражений для SetVars.
|
||||
|
||||
Поддержка:
|
||||
- Литералы: числа/строки/bool/None, списки, кортежи, словари
|
||||
- JSON‑литералы: true/false/null, объекты и массивы (парсятся как Python True/False/None, dict/list)
|
||||
- Арифметика: + - * / // %, унарные +-
|
||||
- Логика: and/or, сравнения (== != < <= > >=, цепочки)
|
||||
- Безопасные функции: rand(), randint(a,b), choice(list)
|
||||
|
||||
Запрещено: имя/атрибуты/индексация/условные/импорты/прочие вызовы функций.
|
||||
"""
|
||||
import ast
|
||||
import operator as op
|
||||
import random
|
||||
|
||||
# 0) Попытаться распознать чистый JSON‑литерал (включая true/false/null, объекты/массивы/числа/строки).
|
||||
# Это не вмешивается в математику: для выражений вида "1+2" json.loads бросит исключение и мы пойдём в AST.
|
||||
try:
|
||||
s = str(expr).strip()
|
||||
return json.loads(s)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
allowed_bin = {
|
||||
ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv,
|
||||
ast.FloorDiv: op.floordiv, ast.Mod: op.mod,
|
||||
}
|
||||
allowed_unary = {ast.UAdd: lambda x: +x, ast.USub: lambda x: -x}
|
||||
allowed_cmp = {
|
||||
ast.Eq: op.eq, ast.NotEq: op.ne, ast.Lt: op.lt, ast.LtE: op.le, ast.Gt: op.gt, ast.GtE: op.ge,
|
||||
}
|
||||
|
||||
def eval_node(node: ast.AST) -> Any:
|
||||
if isinstance(node, ast.Expression):
|
||||
return eval_node(node.body)
|
||||
if isinstance(node, ast.Constant):
|
||||
return node.value
|
||||
if isinstance(node, ast.Tuple):
|
||||
return tuple(eval_node(e) for e in node.elts)
|
||||
if isinstance(node, ast.List):
|
||||
return [eval_node(e) for e in node.elts]
|
||||
if isinstance(node, ast.Dict):
|
||||
return {eval_node(k): eval_node(v) for k, v in zip(node.keys, node.values)}
|
||||
if isinstance(node, ast.UnaryOp) and type(node.op) in allowed_unary:
|
||||
return allowed_unary[type(node.op)](eval_node(node.operand))
|
||||
if isinstance(node, ast.BinOp) and type(node.op) in allowed_bin:
|
||||
return allowed_bin[type(node.op)](eval_node(node.left), eval_node(node.right))
|
||||
if isinstance(node, ast.BoolOp):
|
||||
vals = [eval_node(v) for v in node.values]
|
||||
if isinstance(node.op, ast.And):
|
||||
res = True
|
||||
for v in vals:
|
||||
res = res and bool(v)
|
||||
return res
|
||||
if isinstance(node.op, ast.Or):
|
||||
res = False
|
||||
for v in vals:
|
||||
res = res or bool(v)
|
||||
return res
|
||||
if isinstance(node, ast.Compare):
|
||||
left = eval_node(node.left)
|
||||
for opnode, comparator in zip(node.ops, node.comparators):
|
||||
if type(opnode) not in allowed_cmp:
|
||||
raise ExecutionError("Unsupported comparison operator")
|
||||
right = eval_node(comparator)
|
||||
if not allowed_cmp[type(opnode)](left, right):
|
||||
return False
|
||||
left = right
|
||||
return True
|
||||
# Разрешённые вызовы: rand(), randint(a,b), choice(list)
|
||||
if isinstance(node, ast.Call):
|
||||
# Никаких kwargs, *args
|
||||
if node.keywords or isinstance(getattr(node, "starargs", None), ast.AST) or isinstance(getattr(node, "kwargs", None), ast.AST):
|
||||
raise ExecutionError("Call with kwargs/starargs is not allowed")
|
||||
fn = node.func
|
||||
if not isinstance(fn, ast.Name):
|
||||
raise ExecutionError("Only simple function calls are allowed")
|
||||
name = fn.id
|
||||
if name == "rand":
|
||||
if len(node.args) != 0:
|
||||
raise ExecutionError("rand() takes no arguments")
|
||||
return random.random()
|
||||
if name == "randint":
|
||||
if len(node.args) != 2:
|
||||
raise ExecutionError("randint(a,b) requires two arguments")
|
||||
a = eval_node(node.args[0])
|
||||
b = eval_node(node.args[1])
|
||||
try:
|
||||
return random.randint(int(a), int(b))
|
||||
except Exception as exc: # noqa: BLE001
|
||||
raise ExecutionError(f"randint invalid arguments: {exc}")
|
||||
if name == "choice":
|
||||
if len(node.args) != 1:
|
||||
raise ExecutionError("choice(list) requires one argument")
|
||||
seq = eval_node(node.args[0])
|
||||
if not isinstance(seq, (list, tuple)):
|
||||
raise ExecutionError("choice() expects list or tuple")
|
||||
if not seq:
|
||||
raise ExecutionError("choice() on empty sequence")
|
||||
return random.choice(seq)
|
||||
raise ExecutionError(f"Function {name} is not allowed")
|
||||
# Запрещаем всё остальное (Name/Attribute/Subscript/IfExp/Comprehensions и пр.)
|
||||
raise ExecutionError("Expression not allowed")
|
||||
|
||||
try:
|
||||
tree = ast.parse(str(expr), mode="eval")
|
||||
except Exception as exc:
|
||||
raise ExecutionError(f"SetVars expr parse error: {exc}") from exc
|
||||
return eval_node(tree)
|
||||
|
||||
async def run(self, inputs: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]: # noqa: D401
|
||||
out_map = context.get("OUT") or {}
|
||||
result: Dict[str, Any] = {}
|
||||
import re as _re
|
||||
for v in sorted(self._normalize(), key=lambda x: x.get("order", 0)):
|
||||
name = v.get("name") or ""
|
||||
if not _re.fullmatch(r"[A-Za-z_][A-Za-z0-9_]*", name or ""):
|
||||
raise ExecutionError(f"SetVars invalid variable name: {name!r}")
|
||||
mode = v.get("mode", "string")
|
||||
raw_val = v.get("value", "")
|
||||
if mode == "expr":
|
||||
resolved = self._safe_eval_expr(str(raw_val))
|
||||
else:
|
||||
resolved = render_template_simple(str(raw_val or ""), context, out_map)
|
||||
result[name] = resolved
|
||||
return {"vars": result}
|
||||
|
||||
class ProviderCallNode(Node):
|
||||
type_name = "ProviderCall"
|
||||
|
||||
@@ -520,9 +513,8 @@ class ProviderCallNode(Node):
|
||||
messages: List[Dict[str, Any]] = []
|
||||
for b in blocks:
|
||||
content = render_template_simple(str(b.get("prompt") or ""), context, out_map)
|
||||
# name поля блоков не передаются в провайдерские payload'ы
|
||||
msg = {"role": b["role"], "content": content}
|
||||
if b.get("name"):
|
||||
msg["name"] = b["name"]
|
||||
messages.append(msg)
|
||||
return messages
|
||||
|
||||
@@ -534,7 +526,7 @@ class ProviderCallNode(Node):
|
||||
payload: Dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{k: v for k, v in {"role": m["role"], "content": m["content"], "name": m.get("name")}.items() if v is not None}
|
||||
{"role": m["role"], "content": m["content"]}
|
||||
for m in messages
|
||||
],
|
||||
"temperature": params.get("temperature", 0.7),
|
||||
@@ -582,7 +574,6 @@ class ProviderCallNode(Node):
|
||||
payload: Dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": msgs,
|
||||
"anthropic_version": context.get("anthropic_version", "2023-06-01"),
|
||||
}
|
||||
if sys_text:
|
||||
payload["system"] = sys_text
|
||||
@@ -611,12 +602,12 @@ class ProviderCallNode(Node):
|
||||
msgs = messages or []
|
||||
|
||||
if provider == "openai":
|
||||
# Уже в формате {"role","content","name?"}
|
||||
# Уже в формате {"role","content"}
|
||||
sys_text = "\n\n".join([m["content"] for m in msgs if m.get("role") == "system"]).strip()
|
||||
# Вставляем как есть (editor будет встраивать JSON массива без кавычек)
|
||||
return {
|
||||
"messages": [
|
||||
{k: v for k, v in {"role": m["role"], "content": m.get("content"), "name": m.get("name")}.items() if v is not None}
|
||||
{"role": m["role"], "content": m.get("content")}
|
||||
for m in msgs
|
||||
],
|
||||
"system_text": sys_text,
|
||||
@@ -630,12 +621,13 @@ class ProviderCallNode(Node):
|
||||
continue
|
||||
role = "model" if m.get("role") == "assistant" else "user"
|
||||
contents.append({"role": role, "parts": [{"text": str(m.get("content") or "")}]})
|
||||
sys_instr = {"parts": [{"text": sys_text}]} if sys_text else {} # всегда корректный JSON-объект
|
||||
return {
|
||||
d: Dict[str, Any] = {
|
||||
"contents": contents,
|
||||
"systemInstruction": sys_instr,
|
||||
"system_text": sys_text,
|
||||
}
|
||||
if sys_text:
|
||||
d["systemInstruction"] = {"parts": [{"text": sys_text}]}
|
||||
return d
|
||||
|
||||
if provider == "claude":
|
||||
sys_text = "\n\n".join([m["content"] for m in msgs if m.get("role") == "system"]).strip()
|
||||
@@ -729,13 +721,23 @@ class ProviderCallNode(Node):
|
||||
# Рендер endpoint с макросами/шаблонами
|
||||
endpoint = render(endpoint_tmpl)
|
||||
|
||||
# Формируем тело ТОЛЬКО из template/[[PROMPT]] (без сырого payload/входов)
|
||||
# Формируем тело ТОЛЬКО из template/[[PROMPT]] (без сырого payload/входов).
|
||||
# Больше НИКАКОГО фоллбэка на unified-построение: если шаблон невалиден — это ошибка ноды.
|
||||
try:
|
||||
rendered = render(template)
|
||||
# DEBUG: печать отрендеренного шаблона с номерами строк для точной диагностики JSONDecodeError
|
||||
try:
|
||||
_lines = rendered.splitlines()
|
||||
_preview = "\n".join(f"{i+1:03d}: {_lines[i]}" for i in range(min(len(_lines), 120)))
|
||||
print(f"DEBUG: ProviderCallNode rendered_template node={self.node_id} provider={provider}\\n{_preview}")
|
||||
except Exception:
|
||||
try:
|
||||
print(f"DEBUG: ProviderCallNode rendered_template(node={self.node_id}, provider={provider}) len={len(rendered)}")
|
||||
except Exception:
|
||||
pass
|
||||
payload = json.loads(rendered)
|
||||
except Exception:
|
||||
# Fallback: используем генерацию из Prompt Blocks в формате провайдера
|
||||
payload = self._messages_to_payload(provider, unified_msgs, context)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
raise ExecutionError(f"ProviderCall template invalid JSON: {exc}")
|
||||
|
||||
# Заголовки — полностью из редактируемого JSON с макросами
|
||||
try:
|
||||
@@ -752,22 +754,55 @@ class ProviderCallNode(Node):
|
||||
url = endpoint if endpoint.startswith("http") else urljoin(base_url.rstrip('/') + '/', endpoint.lstrip('/'))
|
||||
|
||||
# Debug logs to validate config selection and payload
|
||||
# Brute request/response logging (FULL, no masking)
|
||||
try:
|
||||
payload_preview = ""
|
||||
final_headers = {"Content-Type": "application/json", **headers}
|
||||
print("===== ProviderCall REQUEST BEGIN =====")
|
||||
print(f"node={self.node_id} type={self.type_name} provider={provider}")
|
||||
print(f"URL: {url}")
|
||||
try:
|
||||
payload_preview = json.dumps(payload, ensure_ascii=False)[:400]
|
||||
print("Headers:")
|
||||
print(json.dumps(final_headers, ensure_ascii=False, indent=2))
|
||||
except Exception:
|
||||
payload_preview = str(payload)[:400]
|
||||
print(f"DEBUG: ProviderCallNode provider={provider} URL={url}")
|
||||
print(f"DEBUG: ProviderCallNode headers_keys={list(headers.keys())}")
|
||||
print(f"DEBUG: ProviderCallNode payload_preview={payload_preview}")
|
||||
print(f"Headers(raw): {final_headers}")
|
||||
try:
|
||||
print("Body JSON:")
|
||||
print(json.dumps(payload, ensure_ascii=False, indent=2))
|
||||
except Exception:
|
||||
print(f"Body(raw): {payload}")
|
||||
print("===== ProviderCall REQUEST END =====")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async with build_client() as client:
|
||||
resp = await client.post(url, json=payload, headers={"Content-Type": "application/json", **headers})
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
resp = await client.post(url, json=payload, headers=final_headers)
|
||||
# Do not raise_for_status: keep body/logs on 4xx/5xx
|
||||
try:
|
||||
print("===== ProviderCall RESPONSE BEGIN =====")
|
||||
print(f"node={self.node_id} type={self.type_name} provider={provider}")
|
||||
print(f"Status: {resp.status_code}")
|
||||
try:
|
||||
print("Headers:")
|
||||
print(json.dumps(dict(resp.headers), ensure_ascii=False, indent=2))
|
||||
except Exception:
|
||||
try:
|
||||
print(f"Headers(raw): {dict(resp.headers)}")
|
||||
except Exception:
|
||||
print("Headers(raw): <unavailable>")
|
||||
try:
|
||||
body_text = resp.text
|
||||
except Exception:
|
||||
body_text = "<resp.text decode error>"
|
||||
print("Body Text:")
|
||||
print(body_text)
|
||||
print("===== ProviderCall RESPONSE END =====")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
data = resp.json()
|
||||
except Exception:
|
||||
data = {"error": "Failed to decode JSON from upstream", "text": resp.text}
|
||||
|
||||
# Извлекаем текст best-effort
|
||||
text = None
|
||||
@@ -860,29 +895,143 @@ class RawForwardNode(Node):
|
||||
|
||||
headers.update(extra_headers)
|
||||
|
||||
print(f"DEBUG: RawForwardNode sending request to URL: {url}")
|
||||
print(f"DEBUG: RawForwardNode sending with HEADERS: {headers}")
|
||||
# Brute request/response logging (FULL, no masking)
|
||||
try:
|
||||
print("===== RawForward REQUEST BEGIN =====")
|
||||
print(f"node={self.node_id} type={self.type_name}")
|
||||
print(f"URL: {url}")
|
||||
try:
|
||||
print("Headers:")
|
||||
print(json.dumps(headers, ensure_ascii=False, indent=2))
|
||||
except Exception:
|
||||
print(f"Headers(raw): {headers}")
|
||||
try:
|
||||
print("Body JSON:")
|
||||
print(json.dumps(raw_payload, ensure_ascii=False, indent=2))
|
||||
except Exception:
|
||||
print(f"Body(raw): {raw_payload}")
|
||||
print("===== RawForward REQUEST END =====")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async with build_client() as client:
|
||||
resp = await client.post(url, json=raw_payload, headers=headers)
|
||||
|
||||
# Логируем ответ от целевого API для диагностики
|
||||
|
||||
# Response logging
|
||||
try:
|
||||
print("===== RawForward RESPONSE BEGIN =====")
|
||||
print(f"node={self.node_id} type={self.type_name}")
|
||||
print(f"Status: {resp.status_code}")
|
||||
try:
|
||||
print("Headers:")
|
||||
print(json.dumps(dict(resp.headers), ensure_ascii=False, indent=2))
|
||||
except Exception:
|
||||
try:
|
||||
print(f"Headers(raw): {dict(resp.headers)}")
|
||||
except Exception:
|
||||
print("Headers(raw): <unavailable>")
|
||||
try:
|
||||
body_text = resp.text
|
||||
except Exception:
|
||||
body_text = "<resp.text decode error>"
|
||||
print("Body Text:")
|
||||
print(body_text)
|
||||
print("===== RawForward RESPONSE END =====")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Decode JSON if possible, otherwise return text
|
||||
try:
|
||||
data = resp.json()
|
||||
print(f"DEBUG: RawForwardNode received response. Status: {resp.status_code}, Body: {data}")
|
||||
except Exception:
|
||||
data = {"error": "Failed to decode JSON from upstream", "text": resp.text}
|
||||
print(f"DEBUG: RawForwardNode received non-JSON response. Status: {resp.status_code}, Text: {resp.text}")
|
||||
return {"result": data}
|
||||
|
||||
# Не выбрасываем исключение, а просто проксируем ответ
|
||||
# resp.raise_for_status()
|
||||
|
||||
return {"result": data}
|
||||
|
||||
class ReturnNode(Node):
|
||||
type_name = "Return"
|
||||
|
||||
async def run(self, inputs: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]: # noqa: D401
|
||||
# Определяем целевой формат
|
||||
cfg = self.config or {}
|
||||
target = str(cfg.get("target_format", "auto")).lower().strip() or "auto"
|
||||
if target == "auto":
|
||||
target = str(context.get("vendor_format") or "openai").lower().strip() or "openai"
|
||||
|
||||
# Рендерим текст из шаблона (по умолчанию берём [[OUT1]])
|
||||
out_map = context.get("OUT") or {}
|
||||
template = cfg.get("text_template")
|
||||
if template is None or template == "":
|
||||
template = "[[OUT1]]"
|
||||
try:
|
||||
text = render_template_simple(str(template), context, out_map)
|
||||
except Exception:
|
||||
text = ""
|
||||
|
||||
model = str(context.get("model") or "")
|
||||
|
||||
# Форматтеры под провайдеры (как в execute_pipeline_echo)
|
||||
def fmt_openai(t: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": "ret_mock_123",
|
||||
"object": "chat.completion",
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {"role": "assistant", "content": t},
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
"usage": {"prompt_tokens": 0, "completion_tokens": len((t or "").split()), "total_tokens": 0},
|
||||
}
|
||||
|
||||
def fmt_gemini(t: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"candidates": [
|
||||
{
|
||||
"content": {
|
||||
"role": "model",
|
||||
"parts": [{"text": t}],
|
||||
},
|
||||
"finishReason": "STOP",
|
||||
"index": 0,
|
||||
}
|
||||
],
|
||||
"modelVersion": model,
|
||||
}
|
||||
|
||||
def fmt_claude(t: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": "msg_ret_123",
|
||||
"type": "message",
|
||||
"model": model,
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{"type": "text", "text": t}
|
||||
],
|
||||
"stop_reason": "end_turn",
|
||||
}
|
||||
|
||||
if target == "openai":
|
||||
result = fmt_openai(text)
|
||||
elif target == "gemini":
|
||||
result = fmt_gemini(text)
|
||||
elif target == "claude":
|
||||
result = fmt_claude(text)
|
||||
else:
|
||||
# неизвестное значение — безопасный дефолт
|
||||
result = fmt_openai(text)
|
||||
|
||||
return {"result": result, "response_text": text}
|
||||
|
||||
|
||||
|
||||
NODE_REGISTRY.update({
|
||||
SetVarsNode.type_name: SetVarsNode,
|
||||
ProviderCallNode.type_name: ProviderCallNode,
|
||||
RawForwardNode.type_name: RawForwardNode,
|
||||
ReturnNode.type_name: ReturnNode,
|
||||
})
|
||||
|
||||
|
||||
|
||||
|
||||
308
agentui/pipeline/templating.py
Normal file
308
agentui/pipeline/templating.py
Normal file
@@ -0,0 +1,308 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
__all__ = [
|
||||
"_OUT_MACRO_RE",
|
||||
"_VAR_MACRO_RE",
|
||||
"_PROMPT_MACRO_RE",
|
||||
"_OUT_SHORT_RE",
|
||||
"_BARE_MACRO_RE",
|
||||
"_BRACES_RE",
|
||||
"_split_path",
|
||||
"_get_by_path",
|
||||
"_stringify_for_template",
|
||||
"_deep_find_text",
|
||||
"_best_text_from_outputs",
|
||||
"render_template_simple",
|
||||
]
|
||||
|
||||
# Regex-макросы (общие для бэка)
|
||||
_OUT_MACRO_RE = re.compile(r"\[\[\s*OUT\s*[:\s]\s*([^\]]+?)\s*\]\]", re.IGNORECASE)
|
||||
_VAR_MACRO_RE = re.compile(r"\[\[\s*VAR\s*[:\s]\s*([^\]]+?)\s*\]\]", re.IGNORECASE)
|
||||
# Единый фрагмент PROMPT (провайдеро-специфичный JSON-фрагмент)
|
||||
_PROMPT_MACRO_RE = re.compile(r"\[\[\s*PROMPT\s*\]\]", re.IGNORECASE)
|
||||
# Короткая форма: [[OUT1]] — best-effort текст из ноды n1
|
||||
_OUT_SHORT_RE = re.compile(r"\[\[\s*OUT\s*(\d+)\s*\]\]", re.IGNORECASE)
|
||||
# Голые переменные: [[NAME]] или [[path.to.value]] — сначала ищем в vars, затем в контексте
|
||||
_BARE_MACRO_RE = re.compile(r"\[\[\s*([A-Za-z_][A-Za-z0-9_]*(?:\.[^\]]+?)?)\s*\]\]")
|
||||
# Подстановки {{ ... }} (включая простейший фильтр |default(...))
|
||||
_BRACES_RE = re.compile(r"\{\{\s*([^}]+?)\s*\}\}")
|
||||
|
||||
|
||||
def _split_path(path: str) -> List[str]:
|
||||
return [p.strip() for p in str(path).split(".") if str(p).strip()]
|
||||
|
||||
|
||||
def _get_by_path(obj: Any, path: Optional[str]) -> Any:
|
||||
if path is None or path == "":
|
||||
return obj
|
||||
cur = obj
|
||||
for seg in _split_path(path):
|
||||
if isinstance(cur, dict):
|
||||
if seg in cur:
|
||||
cur = cur[seg]
|
||||
else:
|
||||
return None
|
||||
elif isinstance(cur, list):
|
||||
try:
|
||||
idx = int(seg)
|
||||
except Exception: # noqa: BLE001
|
||||
return None
|
||||
if 0 <= idx < len(cur):
|
||||
cur = cur[idx]
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
return cur
|
||||
|
||||
|
||||
def _stringify_for_template(val: Any) -> str:
|
||||
if val is None:
|
||||
return ""
|
||||
if isinstance(val, bool):
|
||||
# JSON-friendly booleans (useful when embedding into JSON-like templates)
|
||||
return "true" if val else "false"
|
||||
if isinstance(val, (dict, list)):
|
||||
try:
|
||||
return json.dumps(val, ensure_ascii=False)
|
||||
except Exception: # noqa: BLE001
|
||||
return str(val)
|
||||
return str(val)
|
||||
|
||||
|
||||
def _deep_find_text(obj: Any, max_nodes: int = 5000) -> Optional[str]:
|
||||
"""
|
||||
Best-effort поиск первого текстового значения в глубине структуры JSON.
|
||||
Сначала пытаемся по ключам content/text, затем общий обход.
|
||||
"""
|
||||
try:
|
||||
# Быстрые ветки
|
||||
if isinstance(obj, str):
|
||||
return obj
|
||||
if isinstance(obj, dict):
|
||||
c = obj.get("content")
|
||||
if isinstance(c, str):
|
||||
return c
|
||||
t = obj.get("text")
|
||||
if isinstance(t, str):
|
||||
return t
|
||||
parts = obj.get("parts")
|
||||
if isinstance(parts, list) and parts:
|
||||
for p in parts:
|
||||
if isinstance(p, dict) and isinstance(p.get("text"), str):
|
||||
return p.get("text")
|
||||
|
||||
# Общий нерекурсивный обход в ширину
|
||||
queue: List[Any] = [obj]
|
||||
seen = 0
|
||||
while queue and seen < max_nodes:
|
||||
cur = queue.pop(0)
|
||||
seen += 1
|
||||
if isinstance(cur, str):
|
||||
return cur
|
||||
if isinstance(cur, dict):
|
||||
# часто встречающиеся поля
|
||||
for k in ("text", "content"):
|
||||
v = cur.get(k)
|
||||
if isinstance(v, str):
|
||||
return v
|
||||
# складываем все значения
|
||||
for v in cur.values():
|
||||
queue.append(v)
|
||||
elif isinstance(cur, list):
|
||||
for it in cur:
|
||||
queue.append(it)
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _best_text_from_outputs(node_out: Any) -> str:
|
||||
"""
|
||||
Унифицированное извлечение "текста" из выхода ноды.
|
||||
Поддерживает:
|
||||
- PromptTemplate: {"text": ...}
|
||||
- LLMInvoke: {"response_text": ...}
|
||||
- ProviderCall/RawForward: {"result": <provider_json>}, извлекаем текст для openai/gemini/claude
|
||||
- Общий глубокий поиск текста, если специфичные ветки не сработали
|
||||
"""
|
||||
# Строка сразу
|
||||
if isinstance(node_out, str):
|
||||
return node_out
|
||||
|
||||
if not isinstance(node_out, dict):
|
||||
return ""
|
||||
|
||||
# Явные короткие поля
|
||||
if isinstance(node_out.get("response_text"), str) and node_out.get("response_text"):
|
||||
return str(node_out["response_text"])
|
||||
if isinstance(node_out.get("text"), str) and node_out.get("text"):
|
||||
return str(node_out["text"])
|
||||
|
||||
res = node_out.get("result")
|
||||
base = res if isinstance(res, (dict, list)) else node_out
|
||||
|
||||
# OpenAI
|
||||
try:
|
||||
if isinstance(base, dict):
|
||||
ch0 = (base.get("choices") or [{}])[0]
|
||||
msg = ch0.get("message") or {}
|
||||
c = msg.get("content")
|
||||
if isinstance(c, str):
|
||||
return c
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Gemini
|
||||
try:
|
||||
if isinstance(base, dict):
|
||||
cand0 = (base.get("candidates") or [{}])[0]
|
||||
content = cand0.get("content") or {}
|
||||
parts0 = (content.get("parts") or [{}])[0]
|
||||
t = parts0.get("text")
|
||||
if isinstance(t, str):
|
||||
return t
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Claude
|
||||
try:
|
||||
if isinstance(base, dict):
|
||||
blocks = base.get("content") or []
|
||||
texts = [b.get("text") for b in blocks if isinstance(b, dict) and isinstance(b.get("text"), str)]
|
||||
if texts:
|
||||
return "\n".join(texts)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Общий глубокий поиск
|
||||
txt = _deep_find_text(base)
|
||||
return txt or ""
|
||||
|
||||
|
||||
def render_template_simple(template: str, context: Dict[str, Any], out_map: Dict[str, Any]) -> str:
|
||||
"""
|
||||
Простая подстановка:
|
||||
- {{ path }} — берёт из context (или {{ OUT.node.path }} для выходов)
|
||||
- Поддержка фильтра по умолчанию: {{ path|default(value) }}
|
||||
value может быть числом, строкой ('..'/".."), массивом/объектом в виде литерала.
|
||||
- [[VAR:path]] — берёт из context
|
||||
- [[OUT:nodeId(.path)*]] — берёт из out_map
|
||||
Возвращает строку.
|
||||
"""
|
||||
if template is None:
|
||||
return ""
|
||||
s = str(template)
|
||||
|
||||
# 1) Макросы [[VAR:...]] и [[OUT:...]]
|
||||
def repl_var(m: re.Match) -> str:
|
||||
path = m.group(1).strip()
|
||||
val = _get_by_path(context, path)
|
||||
return _stringify_for_template(val)
|
||||
|
||||
def repl_out(m: re.Match) -> str:
|
||||
body = m.group(1).strip()
|
||||
if "." in body:
|
||||
node_id, rest = body.split(".", 1)
|
||||
node_val = out_map.get(node_id)
|
||||
val = _get_by_path(node_val, rest)
|
||||
else:
|
||||
val = out_map.get(body)
|
||||
return _stringify_for_template(val)
|
||||
|
||||
s = _VAR_MACRO_RE.sub(repl_var, s)
|
||||
s = _OUT_MACRO_RE.sub(repl_out, s)
|
||||
|
||||
# [[OUT1]] → текст из ноды n1 (best-effort)
|
||||
def repl_out_short(m: re.Match) -> str:
|
||||
try:
|
||||
num = int(m.group(1))
|
||||
node_id = f"n{num}"
|
||||
node_out = out_map.get(node_id)
|
||||
txt = _best_text_from_outputs(node_out)
|
||||
return _stringify_for_template(txt)
|
||||
except Exception:
|
||||
return ""
|
||||
s = _OUT_SHORT_RE.sub(repl_out_short, s)
|
||||
|
||||
# [[PROMPT]] — провайдеро-специфичный JSON-фрагмент, подготовленный в context["PROMPT"]
|
||||
s = _PROMPT_MACRO_RE.sub(lambda _m: str(context.get("PROMPT") or ""), s)
|
||||
|
||||
# 1.5) Голые [[NAME]] / [[path.to.value]]
|
||||
def repl_bare(m: re.Match) -> str:
|
||||
name = m.group(1).strip()
|
||||
# Зарезервированные формы уже обработаны выше; бережно пропускаем похожие
|
||||
if name.upper() in {"OUT", "VAR", "PROMPT"} or re.fullmatch(r"OUT\d+", name.upper() or ""):
|
||||
return m.group(0)
|
||||
# Сначала пользовательские переменные
|
||||
vmap = context.get("vars") or {}
|
||||
if isinstance(vmap, dict) and name in vmap:
|
||||
return _stringify_for_template(vmap.get(name))
|
||||
# Затем путь из общего контекста
|
||||
val = _get_by_path(context, name)
|
||||
return _stringify_for_template(val)
|
||||
s = _BARE_MACRO_RE.sub(repl_bare, s)
|
||||
|
||||
# 2) Подстановки {{ ... }} (+ simple default filter)
|
||||
def repl_braces(m: re.Match) -> str:
|
||||
expr = m.group(1).strip()
|
||||
|
||||
def eval_path(p: str) -> Any:
|
||||
p = p.strip()
|
||||
# Приоритет пользовательских переменных для простых идентификаторов {{ NAME }}
|
||||
vmap = context.get("vars") or {}
|
||||
if re.fullmatch(r"[A-Za-z_][A-Za-z0-9_]*", p) and isinstance(vmap, dict) and p in vmap:
|
||||
return vmap.get(p)
|
||||
if p.startswith("OUT."):
|
||||
body = p[4:]
|
||||
if "." in body:
|
||||
node_id, rest = body.split(".", 1)
|
||||
node_val = out_map.get(node_id)
|
||||
return _get_by_path(node_val, rest)
|
||||
return out_map.get(body)
|
||||
return _get_by_path(context, p)
|
||||
|
||||
default_match = re.match(r"([^|]+)\|\s*default\((.*)\)\s*$", expr)
|
||||
if default_match:
|
||||
base_path = default_match.group(1).strip()
|
||||
fallback_raw = default_match.group(2).strip()
|
||||
|
||||
# Рекурсивная обработка вложенных default(...) и путей
|
||||
def eval_default(raw: str) -> Any:
|
||||
raw = raw.strip()
|
||||
# Вложенный default: a|default(b)
|
||||
dm = re.match(r"([^|]+)\|\s*default\((.*)\)\s*$", raw)
|
||||
if dm:
|
||||
base2 = dm.group(1).strip()
|
||||
fb2 = dm.group(2).strip()
|
||||
v2 = eval_path(base2)
|
||||
if v2 not in (None, ""):
|
||||
return v2
|
||||
return eval_default(fb2)
|
||||
# Пробуем как путь
|
||||
v = eval_path(raw)
|
||||
if v not in (None, ""):
|
||||
return v
|
||||
# Явная строка в кавычках
|
||||
if len(raw) >= 2 and ((raw[0] == '"' and raw[-1] == '"') or (raw[0] == "'" and raw[-1] == "'")):
|
||||
return raw[1:-1]
|
||||
# Пробуем распарсить как JSON литерал (число/объект/массив/true/false/null)
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except Exception:
|
||||
# Последний вариант: вернуть сырой текст. Для строк рекомендуется default('...') с кавычками.
|
||||
return raw
|
||||
|
||||
raw_val = eval_path(base_path)
|
||||
val = raw_val if raw_val not in (None, "") else eval_default(fallback_raw)
|
||||
else:
|
||||
val = eval_path(expr)
|
||||
|
||||
return _stringify_for_template(val)
|
||||
|
||||
s = _BRACES_RE.sub(repl_braces, s)
|
||||
return s
|
||||
Reference in New Issue
Block a user