sync: mnogo

This commit is contained in:
2025-10-03 21:55:24 +03:00
parent 2abfbb4b1a
commit 86182c0808
22 changed files with 4462 additions and 1469 deletions

View File

@@ -15,6 +15,23 @@ from agentui.pipeline.storage import load_pipeline, save_pipeline, list_presets,
from agentui.common.vendors import detect_vendor
from agentui.common.cancel import request_cancel, clear_cancel, is_cancelled
from agentui.pipeline.templating import render_template_simple
# Manual resend support: use http client builder and executor helpers to sanitize/lookup originals
from agentui.providers.http_client import build_client
from agentui.pipeline.executor import (
_sanitize_b64_for_log as _san_b64,
_sanitize_json_string_for_log as _san_json_str,
get_http_request as _get_http_req,
)
from agentui.common.manual_http import (
parse_editable_http,
dedupe_headers,
content_type_is_json,
normalize_jsonish_text,
extract_json_trailing,
try_parse_json,
salvage_json_for_send,
register_manual_request,
)
class UnifiedParams(BaseModel):
@@ -744,6 +761,9 @@ def create_app() -> FastAPI:
# --- Manual cancel/clear for pipeline execution ---
@app.post("/admin/cancel")
async def admin_cancel() -> JSONResponse:
"""
Graceful cancel: do not interrupt in-flight operations; stop before next step.
"""
try:
p = load_pipeline()
pid = p.get("id", "pipeline_editor")
@@ -751,10 +771,27 @@ def create_app() -> FastAPI:
p = default_pipeline()
pid = p.get("id", "pipeline_editor")
try:
request_cancel(pid)
request_cancel(pid, mode="graceful")
except Exception:
pass
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": True})
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": True, "mode": "graceful"})
@app.post("/admin/cancel/abort")
async def admin_cancel_abort() -> JSONResponse:
"""
Hard abort: attempt to interrupt in-flight operations immediately.
"""
try:
p = load_pipeline()
pid = p.get("id", "pipeline_editor")
except Exception:
p = default_pipeline()
pid = p.get("id", "pipeline_editor")
try:
request_cancel(pid, mode="abort")
except Exception:
pass
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": True, "mode": "abort"})
@app.post("/admin/cancel/clear")
async def admin_cancel_clear() -> JSONResponse:
@@ -770,7 +807,380 @@ def create_app() -> FastAPI:
pass
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": False})
# --- SSE endpoint for live pipeline trace ---
# --- Manual HTTP resend endpoint (Burp-like Repeater for Logs) -----------------
@app.post("/admin/http/manual-send")
async def manual_send(request: Request) -> JSONResponse:
"""
Re-send an HTTP request from Logs with optional edits from UI.
Accepts JSON:
{
"req_id": "original-req-id", // required to fetch original (untrimmed) body if available
"request_text": "METHOD URL HTTP/1.1\\nH: V\\n\\n{...}", // optional raw edited HTTP text from UI
"prefer_registry_original": true, // use untrimmed original JSON body where possible
// Optional explicit overrides (take precedence over parsed request_text):
"method": "POST",
"url": "https://example/api",
"headers": { "Authorization": "Bearer [[VAR:incoming.headers.authorization]]" },
"body_text": "{...}" // explicit body text override (string)
}
Behavior:
- Parses request_text into method/url/headers/body if provided.
- Looks up original untrimmed body_json by req_id from executor registry.
- If prefer_registry_original and edited body parses as JSON — deep-merge it onto original JSON (dicts merged, lists replaced).
- If prefer_registry_original and edited body contains human preview fragments (e.g. trimmed) or fails JSON parse — try to extract the last JSON object from text; else fallback to original body_json.
- Resolves [[...]] and {{ ... }} macros (URL/headers/body) against last STORE snapshot (vars + snapshot.OUT/etc) of the pipeline.
- Emits http_req/http_resp SSE with a fresh req_id ('manual-<ts>') so the original log is never overwritten.
"""
try:
payload = await request.json()
except Exception:
payload = {}
# Parse edited HTTP text (Request area)
def _parse_http_text(s: str) -> tuple[str, str, Dict[str, str], str]:
method, url = "POST", ""
headers: Dict[str, str] = {}
body = ""
try:
if not isinstance(s, str) or not s.strip():
return method, url, headers, body
txt = s.replace("\r\n", "\n")
lines = txt.split("\n")
if not lines:
return method, url, headers, body
first = (lines[0] or "").strip()
import re as _re
m = _re.match(r"^([A-Z]+)\s+(\S+)(?:\s+HTTP/\d+(?:\.\d+)?)?$", first)
i = 1
if m:
method = (m.group(1) or "POST").strip().upper()
url = (m.group(2) or "").strip()
else:
i = 0 # no start line → treat as headers/body only
def _is_header_line(ln: str) -> bool:
if ":" not in ln:
return False
name = ln.split(":", 1)[0].strip()
# HTTP token: allow only letters/digits/hyphen. This prevents JSON lines like "contents": ... being treated as headers.
return bool(_re.fullmatch(r"[A-Za-z0-9\\-]+", name))
# Read headers until a blank line OR until a non-header-looking line (start of body)
while i < len(lines):
ln = lines[i]
if ln.strip() == "":
i += 1
break
if not _is_header_line(ln):
# Assume this and the rest is body (e.g., starts with {, [, or a quoted key)
break
k, v = ln.split(":", 1)
headers[str(k).strip()] = str(v).strip()
i += 1
# Remainder is the body (can be JSON or any text)
body = "\\n".join(lines[i:]) if i < len(lines) else ""
except Exception:
pass
return method, url, headers, body
# Lookup original (untrimmed) body by req_id
orig: Optional[Dict[str, Any]] = None
try:
orig = _get_http_req(str(payload.get("req_id") or ""))
except Exception:
orig = None
# Pipeline meta (timeout) and pipeline id
try:
p = load_pipeline()
default_pid = p.get("id", "pipeline_editor")
timeout_sec = float(p.get("http_timeout_sec", 60) or 60)
except Exception:
default_pid = "pipeline_editor"
timeout_sec = 60.0
pid = str((orig or {}).get("pipeline_id") or default_pid)
# Build macro context from STORE (last snapshot)
try:
store = load_var_store(pid) or {}
except Exception:
store = {}
snapshot = store.get("snapshot") or {}
ctx: Dict[str, Any] = {}
try:
ctx.update({
"incoming": snapshot.get("incoming"),
"params": snapshot.get("params"),
"model": snapshot.get("model"),
"vendor_format": snapshot.get("vendor_format"),
"system": snapshot.get("system") or "",
})
except Exception:
pass
try:
ctx["OUT"] = snapshot.get("OUT") or {}
except Exception:
ctx["OUT"] = {}
try:
vmap = dict(store)
vmap.pop("snapshot", None)
ctx["vars"] = vmap
ctx["store"] = store
except Exception:
ctx["vars"] = {}
ctx["store"] = store or {}
# Extract overrides / edited request data
edited_text = payload.get("request_text") or ""
ov_method = payload.get("method")
ov_url = payload.get("url")
ov_headers = payload.get("headers") if isinstance(payload.get("headers"), dict) else None
ov_body_text = payload.get("body_text")
prefer_orig = bool(payload.get("prefer_registry_original", True))
# Parse HTTP text (safe)
m_parsed, u_parsed, h_parsed, b_parsed = parse_editable_http(edited_text)
# Compose method/url/headers
method = str(ov_method or m_parsed or (orig or {}).get("method") or "POST").upper()
url = str(ov_url or u_parsed or (orig or {}).get("url") or "")
# headers: start from original -> parsed -> explicit override
headers: Dict[str, Any] = {}
try:
if isinstance((orig or {}).get("headers"), dict):
headers.update(orig.get("headers") or {})
except Exception:
pass
try:
headers.update(h_parsed or {})
except Exception:
pass
try:
if isinstance(ov_headers, dict):
headers.update(ov_headers)
except Exception:
pass
# Render macros in URL and headers
try:
if url:
url = render_template_simple(str(url), ctx, ctx.get("OUT") or {})
except Exception:
pass
try:
rendered_headers: Dict[str, Any] = {}
for k, v in headers.items():
try:
rendered_headers[k] = render_template_simple(str(v), ctx, ctx.get("OUT") or {})
except Exception:
rendered_headers[k] = v
headers = rendered_headers
except Exception:
pass
# Normalize/dedupe headers (case-insensitive) and drop auto-calculated ones
headers = dedupe_headers(headers)
# Determine body (JSON vs text), preserving original untrimmed JSON
# Build orig_json (prefer registry; fallback parse from original body_text)
orig_json = (orig or {}).get("body_json") if isinstance(orig, dict) else None
if orig_json is None:
try:
ob = (orig or {}).get("body_text")
except Exception:
ob = None
if isinstance(ob, str):
try:
ob_norm = normalize_jsonish_text(ob)
except Exception:
ob_norm = ob
_oj = try_parse_json(ob_norm) or extract_json_trailing(ob_norm)
if _oj is not None:
orig_json = _oj
# Resolve body edits through macros
raw_edited_body_text = ov_body_text if ov_body_text is not None else b_parsed
try:
edited_body_text_resolved = render_template_simple(str(raw_edited_body_text or ""), ctx, ctx.get("OUT") or {})
except Exception:
edited_body_text_resolved = str(raw_edited_body_text or "")
# Compute final_json / final_text using helper (handles normalization, salvage, prefer_registry_original, content-type)
final_json, final_text = salvage_json_for_send(
edited_body_text_resolved,
headers,
orig_json,
prefer_orig
)
# Diagnostic: summarize merge decision without leaking payload
try:
def _summ(v):
try:
if v is None:
return {"t": "none"}
if isinstance(v, dict):
return {"t": "dict", "keys": len(v)}
if isinstance(v, list):
return {"t": "list", "len": len(v)}
if isinstance(v, str):
return {"t": "str", "len": len(v)}
return {"t": type(v).__name__}
except Exception:
return {"t": "err"}
norm_dbg = normalize_jsonish_text(edited_body_text_resolved)
edited_json_dbg = try_parse_json(norm_dbg) or extract_json_trailing(norm_dbg)
logger.info(
"%s",
json.dumps(
{
"event": "manual_send_merge_debug",
"req_id_original": str(payload.get("req_id") or ""),
"prefer_registry_original": prefer_orig,
"headers_content_type": ("json" if content_type_is_json(headers) else "other"),
"orig_json": _summ(orig_json),
"edited_json": _summ(edited_json_dbg),
"final": {
"json": _summ(final_json),
"text_len": (len(final_text) if isinstance(final_text, str) else None)
},
},
ensure_ascii=False,
),
)
except Exception:
pass
# Fresh req_id to avoid any overwrite of original log
import time as _time
rid = f"manual-{int(_time.time()*1000)}"
async def _publish(evt: Dict[str, Any]) -> None:
try:
await _trace_hub.publish(evt)
except Exception:
pass
# Prepare request body for logs (sanitized/trimmed for base64)
if final_json is not None:
try:
body_text_for_log = json.dumps(_san_b64(final_json, max_len=180), ensure_ascii=False, indent=2)
except Exception:
body_text_for_log = json.dumps(final_json, ensure_ascii=False)
else:
try:
body_text_for_log = _san_json_str(str(final_text or ""), max_len=180)
except Exception:
body_text_for_log = str(final_text or "")
# Register manual request in registry so subsequent "send" on this log has an original JSON source
try:
register_manual_request(rid, {
"pipeline_id": pid,
"node_id": "manual",
"node_type": "Manual",
"method": method,
"url": url,
"headers": dict(headers),
"body_json": (final_json if final_json is not None else None),
"body_text": (None if final_json is not None else str(final_text or "")),
})
except Exception:
pass
# Emit http_req SSE (Manual)
await _publish({
"event": "http_req",
"node_id": "manual",
"node_type": "Manual",
"provider": "manual",
"req_id": rid,
"method": method,
"url": url,
"headers": headers,
"body_text": body_text_for_log,
"ts": int(_time.time()*1000),
})
# Perform HTTP
async with build_client(timeout=timeout_sec) as client:
# Ensure JSON Content-Type when sending JSON
try:
if final_json is not None:
has_ct = any((str(k or "").lower() == "content-type") for k in headers.keys())
if not has_ct:
headers["Content-Type"] = "application/json"
except Exception:
pass
content = None
try:
if method in {"GET", "HEAD"}:
content = None
else:
if final_json is not None:
content = json.dumps(final_json, ensure_ascii=False).encode("utf-8")
else:
content = (final_text or "").encode("utf-8")
except Exception:
content = None
# Send
try:
resp = await client.request(method, url, headers=headers, content=content)
except Exception as e:
# Network/client error — emit http_resp with error text
await _publish({
"event": "http_resp",
"node_id": "manual",
"node_type": "Manual",
"provider": "manual",
"req_id": rid,
"status": 0,
"headers": {},
"body_text": str(e),
"ts": int(_time.time()*1000),
})
return JSONResponse({"ok": False, "error": str(e), "req_id": rid})
# Build response body for log (prefer JSON with trimmed base64)
try:
try:
obj = resp.json()
body_text_resp = json.dumps(_san_b64(obj, max_len=180), ensure_ascii=False, indent=2)
except Exception:
try:
t = await resp.aread()
body_text_resp = t.decode(getattr(resp, "encoding", "utf-8") or "utf-8", errors="replace")
except Exception:
try:
body_text_resp = resp.text
except Exception:
body_text_resp = "<resp.decode error>"
except Exception:
body_text_resp = "<resp.decode error>"
await _publish({
"event": "http_resp",
"node_id": "manual",
"node_type": "Manual",
"provider": "manual",
"req_id": rid,
"status": int(getattr(resp, "status_code", 0)),
"headers": dict(getattr(resp, "headers", {})),
"body_text": body_text_resp,
"ts": int(_time.time()*1000),
})
return JSONResponse({"ok": True, "req_id": rid})
# --- SSE endpoint for live pipeline trace --- # --- SSE endpoint for live pipeline trace ---
@app.get("/admin/trace/stream")
async def sse_trace() -> StreamingResponse:
loop = _asyncio.get_event_loop()

View File

@@ -6,14 +6,25 @@ import threading
# Simple in-process cancel flags storage (per pipeline_id)
# Thread-safe for FastAPI workers in same process
_cancel_flags: Dict[str, bool] = {}
# Mode of cancellation per pipeline: "graceful" (default) or "abort"
_cancel_modes: Dict[str, str] = {}
_lock = threading.Lock()
def request_cancel(pipeline_id: str) -> None:
"""Set cancel flag for given pipeline id."""
def request_cancel(pipeline_id: str, mode: str = "graceful") -> None:
"""Set cancel flag for given pipeline id with an optional mode.
mode:
- "graceful": do not interrupt in-flight operations, stop before next step
- "abort": attempt to cancel in-flight operations immediately
"""
pid = str(pipeline_id or "pipeline_editor")
m = str(mode or "graceful").lower().strip()
if m not in {"graceful", "abort"}:
m = "graceful"
with _lock:
_cancel_flags[pid] = True
_cancel_modes[pid] = m
def clear_cancel(pipeline_id: str) -> None:
@@ -21,10 +32,19 @@ def clear_cancel(pipeline_id: str) -> None:
pid = str(pipeline_id or "pipeline_editor")
with _lock:
_cancel_flags.pop(pid, None)
_cancel_modes.pop(pid, None)
def is_cancelled(pipeline_id: str) -> bool:
"""Check cancel flag for given pipeline id."""
pid = str(pipeline_id or "pipeline_editor")
with _lock:
return bool(_cancel_flags.get(pid, False))
return bool(_cancel_flags.get(pid, False))
def get_cancel_mode(pipeline_id: str) -> str:
"""Return current cancel mode for given pipeline id: 'graceful' or 'abort' (default graceful)."""
pid = str(pipeline_id or "pipeline_editor")
with _lock:
m = _cancel_modes.get(pid)
return m if m in {"graceful", "abort"} else "graceful"

View File

@@ -0,0 +1,415 @@
from __future__ import annotations
import json
import re
from typing import Any, Dict, Optional, Tuple
# Reuse executor's registry for original (untrimmed) requests
try:
from agentui.pipeline.executor import register_http_request as _reg_http_req # type: ignore
except Exception: # pragma: no cover
_reg_http_req = None # type: ignore
# -------- HTTP editable text parser (safe) --------
def parse_editable_http(s: str) -> Tuple[str, str, Dict[str, str], str]:
"""
Parse text pasted from Request area into (method, url, headers, body_text).
Stops header parsing when a line is not a valid HTTP header key (prevents treating JSON like '"contents": ...' as header).
"""
method, url = "POST", ""
headers: Dict[str, str] = {}
body = ""
try:
if not isinstance(s, str) or not s.strip():
return method, url, headers, body
txt = s.replace("\r\n", "\n")
lines = txt.split("\n")
if not lines:
return method, url, headers, body
first = (lines[0] or "").strip()
m = re.match(r"^([A-Z]+)\s+(\S+)(?:\s+HTTP/\d+(?:\.\d+)?)?$", first)
i = 1
if m:
method = (m.group(1) or "POST").strip().upper()
url = (m.group(2) or "").strip()
else:
i = 0 # no start-line -> treat as headers/body only
def _is_header_line(ln: str) -> bool:
if ":" not in ln:
return False
name = ln.split(":", 1)[0].strip()
# HTTP token: only letters/digits/hyphen. Prevents JSON keys like "contents": from being treated as headers.
return bool(re.fullmatch(r"[A-Za-z0-9\-]+", name))
# Read headers until blank line OR until line not looking like header (start of body)
while i < len(lines):
ln = lines[i]
if ln.strip() == "":
i += 1
break
if not _is_header_line(ln):
break
k, v = ln.split(":", 1)
headers[str(k).strip()] = str(v).strip()
i += 1
# Remainder is body (JSON or text)
body = "\n".join(lines[i:]) if i < len(lines) else ""
except Exception:
pass
return method, url, headers, body
# -------- Headers helpers --------
def dedupe_headers(h: Dict[str, Any]) -> Dict[str, Any]:
"""
Case-insensitive dedupe; drop Host/Content-Length (httpx will set proper).
Last value wins.
"""
try:
dedup: Dict[str, Tuple[str, Any]] = {}
for k, v in (h or {}).items():
lk = str(k).strip().lower()
if lk in {"host", "content-length"}:
continue
dedup[lk] = (k, v)
return {orig_k: val for (_, (orig_k, val)) in dedup.items()}
except Exception:
return dict(h or {})
def content_type_is_json(h: Dict[str, Any]) -> bool:
try:
return any(str(k).lower() == "content-type" and "json" in str(v).lower() for k, v in (h or {}).items())
except Exception:
return False
# -------- JSON parsing & normalization helpers --------
def try_parse_json(s: Any) -> Optional[Any]:
try:
if isinstance(s, (dict, list)):
return s
if isinstance(s, str) and s.strip():
return json.loads(s)
except Exception:
return None
return None
def normalize_jsonish_text(s: Any) -> str:
"""
Normalize JSON-looking text safely:
- If whole text is a quoted JSON string, decode via json.loads to inner string.
- Replace visible \\n/\\r/\\t outside JSON string literals with real control chars.
- Escape raw CR/LF/TAB inside JSON string literals as \\n/\\r/\\t to keep JSON valid.
"""
try:
txt = str(s if s is not None else "")
except Exception:
return ""
# If whole text looks like a quoted JSON string: decode to inner string
try:
if len(txt) >= 2 and txt[0] == '"' and txt[-1] == '"':
v = json.loads(txt)
if isinstance(v, str):
txt = v
except Exception:
pass
out_chars = []
i = 0
n = len(txt)
in_str = False
esc = False
while i < n:
ch = txt[i]
if in_str:
# escape raw control chars within JSON string literal
if ch == "\r":
# CRLF -> \n
if (i + 1) < n and txt[i + 1] == "\n":
out_chars.append("\\n")
i += 2
esc = False
continue
out_chars.append("\\r")
i += 1
esc = False
continue
if ch == "\n":
out_chars.append("\\n")
i += 1
esc = False
continue
if ch == "\t":
out_chars.append("\\t")
i += 1
esc = False
continue
out_chars.append(ch)
if esc:
esc = False
else:
if ch == "\\":
esc = True
elif ch == '"':
in_str = False
i += 1
continue
# not in string literal
if ch == '"':
in_str = True
out_chars.append(ch)
i += 1
continue
if ch == "\\" and (i + 1) < n:
nx = txt[i + 1]
if nx == "n":
out_chars.append("\n")
i += 2
continue
if nx == "r":
out_chars.append("\r")
i += 2
continue
if nx == "t":
out_chars.append("\t")
i += 2
continue
out_chars.append(ch)
i += 1
return "".join(out_chars)
def extract_json_trailing(s: str) -> Optional[Any]:
"""
Pull trailing JSON object/array from mixed text:
- Try whole text first
- Then scan from last '{' or '[' backward.
"""
try:
if not isinstance(s, str):
return None
txt = s.strip()
try:
return json.loads(txt)
except Exception:
pass
idx = txt.rfind("{")
while idx >= 0:
seg = txt[idx:]
try:
return json.loads(seg)
except Exception:
idx = txt.rfind("{", 0, idx)
idx = txt.rfind("[")
while idx >= 0:
seg = txt[idx:]
try:
return json.loads(seg)
except Exception:
idx = txt.rfind("[", 0, idx)
return None
except Exception:
return None
def global_unescape_jsonish(s: str) -> str:
"""
Last-resort: unicode_escape decode to convert \\n -> \n, \\" -> ", \\\\ -> \, \\uXXXX -> char, etc.
"""
try:
import codecs as _codecs
return _codecs.decode(s, "unicode_escape")
except Exception:
try:
return (
s.replace("\\n", "\n")
.replace("\\r", "\r")
.replace("\\t", "\t")
.replace('\\"', '"')
.replace("\\\\", "\\")
)
except Exception:
return s
def looks_jsonish(txt: Any) -> bool:
try:
s = str(txt or "")
if "{" in s or "[" in s:
return True
# also patterns like key:
return bool(re.search(r'\s["\']?[A-Za-z0-9_\-]+["\']?\s*:', s))
except Exception:
return False
def deep_merge_dicts(a: Any, b: Any) -> Any:
"""
Merge dicts (b over a, recursively). Lists or non-dicts are replaced by b.
"""
if isinstance(a, dict) and isinstance(b, dict):
out = dict(a)
for k, v in b.items():
if (k in a) and isinstance(a.get(k), dict) and isinstance(v, dict):
out[k] = deep_merge_dicts(a.get(k), v)
else:
out[k] = v
return out
return b
# ---- Trim-aware merge that preserves original binary/base64 fields ----
def is_trimmed_b64_string(s: Any) -> bool:
try:
if not isinstance(s, str):
return False
return "(trimmed " in s
except Exception:
return False
def looks_base64ish(s: Any) -> bool:
try:
if not isinstance(s, str) or len(s) < 64:
return False
return bool(re.fullmatch(r"[A-Za-z0-9+/=\r\n]+", s))
except Exception:
return False
def merge_lists_preserving_b64(orig_list: Any, edited_list: Any) -> Any:
"""
Merge lists with base64-trimmed preservation but DO NOT pad from original:
- Result length equals edited_list length (indices beyond edited are dropped).
- At each index:
* If edited value is a trimmed placeholder string and original has a string → keep original.
* If both dicts → recurse via deep_merge_preserving_b64.
* If both lists → recurse via merge_lists_preserving_b64.
* Else → take edited value as-is.
"""
if not isinstance(edited_list, list):
return edited_list
if not isinstance(orig_list, list):
orig_list = []
out = []
for i, ev in enumerate(edited_list):
ov = orig_list[i] if i < len(orig_list) else None
if isinstance(ev, str) and is_trimmed_b64_string(ev) and isinstance(ov, str):
out.append(ov)
elif isinstance(ev, dict) and isinstance(ov, dict):
out.append(deep_merge_preserving_b64(ov, ev))
elif isinstance(ev, list) and isinstance(ov, list):
out.append(merge_lists_preserving_b64(ov, ev))
else:
out.append(ev)
return out
def deep_merge_preserving_b64(orig: Any, edited: Any) -> Any:
"""
Merge preserving original base64/data_url only for trimmed placeholders, with strict edited-shape:
- If edited is a trimmed placeholder string and orig is a string → keep orig.
- Dicts: RESULT CONTAINS ONLY KEYS FROM EDITED. Keys missing in edited are treated as deleted.
For each present key: recurse (dict/list) or take edited value; for trimmed strings keep orig.
- Lists: delegate to merge_lists_preserving_b64 (result length = edited length).
- Other types: replace with edited.
"""
if isinstance(edited, str) and is_trimmed_b64_string(edited) and isinstance(orig, str):
return orig
if isinstance(orig, dict) and isinstance(edited, dict):
out: Dict[str, Any] = {}
for k, ev in edited.items():
ov = orig.get(k)
if isinstance(ev, str) and is_trimmed_b64_string(ev) and isinstance(ov, str):
out[k] = ov
elif isinstance(ev, dict) and isinstance(ov, dict):
out[k] = deep_merge_preserving_b64(ov, ev)
elif isinstance(ev, list) and isinstance(ov, list):
out[k] = merge_lists_preserving_b64(ov, ev)
else:
out[k] = ev
return out
if isinstance(orig, list) and isinstance(edited, list):
return merge_lists_preserving_b64(orig, edited)
return edited
def salvage_json_for_send(
edited_body_text: Any,
headers: Dict[str, Any],
orig_json: Optional[Any],
prefer_registry_original: bool = True,
) -> Tuple[Optional[Any], Optional[str]]:
"""
Build (final_json, final_text) for outgoing request body.
Strategy:
- Normalize text for JSON.
- Try parse; then try trailing extract; then unicode_escape unescape and retry.
- If prefer_registry_original=True and orig_json present:
* If edited_json present: deep-merge with base64 preservation, but ONLY keep keys present in edited;
lists are limited to the edited length (no padding from original).
* If not: DO NOT resurrect original. Empty/whitespace → send empty text; otherwise send raw text as-is.
- Else:
* If edited_json present => final_json = edited_json
* Else: if content-type is json and orig_json present => final_json = orig_json
else send raw text.
"""
# Normalize and attempt parse
norm = normalize_jsonish_text(edited_body_text)
edited_json = try_parse_json(norm)
if edited_json is None:
edited_json = extract_json_trailing(norm)
if edited_json is None:
ue = global_unescape_jsonish(str(edited_body_text or ""))
if isinstance(ue, str) and ue != edited_body_text:
ue_norm = normalize_jsonish_text(ue)
edited_json = try_parse_json(ue_norm) or extract_json_trailing(ue_norm)
json_ct = content_type_is_json(headers)
# Prefer original registry JSON where applicable
if prefer_registry_original and orig_json is not None:
if edited_json is None:
# Respect full manual control: do NOT resurrect original JSON.
# Empty/whitespace → send empty text; otherwise send raw text as-is.
if isinstance(norm, str) and not norm.strip():
return None, ""
else:
return None, str(edited_body_text or "")
else:
# Merge edits over original with trimmed-b64 preservation, but keep only keys present in edited
# and limit lists to the edited length.
return deep_merge_preserving_b64(orig_json, edited_json), None
# No prefer or no orig_json
if edited_json is not None:
return edited_json, None
if json_ct and orig_json is not None:
# Hard salvage for declared JSON payloads
maybe = try_parse_json(norm) or extract_json_trailing(norm)
return (maybe if maybe is not None else orig_json), None
# Plain text fallback
return None, str(edited_body_text or "")
# -------- Registry wrapper --------
def register_manual_request(req_id: str, info: Dict[str, Any]) -> None:
try:
if _reg_http_req:
_reg_http_req(req_id, info)
except Exception:
pass

View File

@@ -10,6 +10,7 @@ import hashlib
from collections import deque
from agentui.providers.http_client import build_client
from agentui.common.vendors import detect_vendor
from agentui.providers.adapters.registry import get_adapter, default_base_url_for as _adapter_default_base_url_for
from agentui.pipeline.templating import (
_OUT_MACRO_RE,
_VAR_MACRO_RE,
@@ -25,7 +26,45 @@ from agentui.pipeline.templating import (
eval_condition_expr,
)
from agentui.pipeline.storage import load_var_store, save_var_store, clear_var_store
from agentui.common.cancel import is_cancelled, clear_cancel
from agentui.common.cancel import is_cancelled, clear_cancel, get_cancel_mode
# HTTP request registry for manual resend feature (store original untrimmed bodies)
from collections import OrderedDict
_HTTP_REQ_REGISTRY_MAX = 200
_HTTP_REQ_REGISTRY = OrderedDict()
def register_http_request(req_id: str, info: Dict[str, Any]) -> None:
try:
rid = str(req_id or "")
if not rid:
return
# overwrite if exists to refresh order
if rid in _HTTP_REQ_REGISTRY:
try:
_HTTP_REQ_REGISTRY.pop(rid, None)
except Exception:
pass
_HTTP_REQ_REGISTRY[rid] = info or {}
# trim oldest beyond cap
while len(_HTTP_REQ_REGISTRY) > _HTTP_REQ_REGISTRY_MAX:
try:
_HTTP_REQ_REGISTRY.popitem(last=False)
except Exception:
break
except Exception:
pass
def get_http_request(req_id: str) -> Optional[Dict[str, Any]]:
try:
rid = str(req_id or "")
if not rid:
return None
v = _HTTP_REQ_REGISTRY.get(rid)
# return shallow copy to avoid mutation
return dict(v) if isinstance(v, dict) else None
except Exception:
return None
# --- Global helpers: robust auto-decompression for logging/JSON parsing ----------
import gzip
@@ -115,6 +154,54 @@ def _safe_response_json(resp) -> Any:
return {"error": "Failed to decode JSON from upstream", "text": t}
# --- Cooperative cancel/abort helper for in-flight awaits (HTTP, etc.) ---------
async def _await_coro_with_cancel(coro: Awaitable[Any], pipeline_id: str, poll_interval: float = 0.1) -> Any:
"""
Await 'coro' while polling manual cancel flag for the pipeline.
Behavior:
- If cancel mode is 'graceful' -> do NOT interrupt 'coro'; we just continue waiting
and upper layers will stop before scheduling new work.
- If cancel mode is 'abort' -> cancel in-flight task immediately and raise ExecutionError.
Returns the result of 'coro' or raises ExecutionError on abort.
"""
task = asyncio.create_task(coro)
while True:
try:
# Shield from timeout cancellation; we only use timeout to poll flags.
return await asyncio.wait_for(asyncio.shield(task), timeout=poll_interval)
except asyncio.TimeoutError:
# Not done yet — fall through to polling
pass
except asyncio.CancelledError:
# Map in-flight cancellation to ExecutionError on abort; otherwise re-raise
try:
mode = get_cancel_mode(pipeline_id)
except Exception:
mode = "abort"
if mode == "abort":
raise ExecutionError("Cancelled by user (abort)")
raise
# Poll cancel flag
try:
if is_cancelled(pipeline_id):
mode = get_cancel_mode(pipeline_id)
if mode == "abort":
try:
if not task.done():
task.cancel()
await task
except asyncio.CancelledError:
raise ExecutionError("Cancelled by user (abort)")
except Exception as exc: # noqa: BLE001
raise ExecutionError(f"Cancelled by user (abort): {exc}")
# graceful: do not interrupt; keep waiting
except Exception:
# Be defensive: ignore polling failures and keep waiting
pass
# --- Helpers: sanitize base64/data URLs in JSON for logging (Burp-like) ----------
def _is_b64ish_string(s: str) -> bool:
try:
@@ -442,7 +529,14 @@ class PipelineExecutor:
# Исполнение
try:
out = await node.run(inputs, ctx)
except Exception as exc:
except BaseException as exc:
err = exc
try:
import asyncio as _asyncio
if isinstance(exc, _asyncio.CancelledError):
err = ExecutionError("Cancelled by user (abort)")
except Exception:
pass
if trace is not None:
try:
await trace({
@@ -451,11 +545,11 @@ class PipelineExecutor:
"node_type": node.type_name,
"wave": wave_num,
"ts": int(time.time() * 1000),
"error": str(exc),
"error": str(err),
})
except Exception:
pass
raise
raise err
else:
dur_ms = int((time.perf_counter() - started) * 1000)
if trace is not None:
@@ -2053,15 +2147,26 @@ class ProviderCallNode(Node):
# Default endpoints if not set
if not endpoint_tmpl:
if provider == "openai":
endpoint_tmpl = "/v1/chat/completions"
elif provider == "gemini":
endpoint_tmpl = "/v1beta/models/{{ model }}:generateContent"
elif provider == "gemini_image":
# Специальный провайдер для генерации/превью изображений Gemini (generateContent)
endpoint_tmpl = "/v1beta/models/{{ model }}:generateContent"
elif provider == "claude":
endpoint_tmpl = "/v1/messages"
_ad = None
try:
_ad = get_adapter(provider)
except Exception:
_ad = None
if _ad is not None:
try:
endpoint_tmpl = _ad.default_endpoint(str(context.get("model") or ""))
except Exception:
endpoint_tmpl = ""
if not endpoint_tmpl:
if provider == "openai":
endpoint_tmpl = "/v1/chat/completions"
elif provider == "gemini":
endpoint_tmpl = "/v1beta/models/{{ model }}:generateContent"
elif provider == "gemini_image":
# Специальный провайдер для генерации/превью изображений Gemini (generateContent)
endpoint_tmpl = "/v1beta/models/{{ model }}:generateContent"
elif provider == "claude":
endpoint_tmpl = "/v1/messages"
# Default template for gemini_image if none provided (inject [[PROMPT]])
try:
@@ -2074,11 +2179,259 @@ class ProviderCallNode(Node):
# Подготовим Prompt Blocks + pm-структуру для шаблона
unified_msgs = self._render_blocks_to_unified(context)
pm_struct = self._blocks_struct_for_template(provider, unified_msgs, context)
adapter = None
try:
adapter = get_adapter(provider)
except Exception:
adapter = None
if adapter:
blocks_struct = adapter.blocks_struct_for_template(unified_msgs, context, self.config or {})
else:
blocks_struct = self._blocks_struct_for_template(provider, unified_msgs, context)
pm_struct = dict(blocks_struct)
# Расширяем контекст для рендера шаблонов
render_ctx = dict(context)
render_ctx["pm"] = pm_struct
# Прокинем конфиг ноды в контекст для адаптеров (например, claude_no_system)
try:
render_ctx["_node_config"] = dict(self.config or {})
except Exception:
render_ctx["_node_config"] = {}
# Node-local: track VAR paths overridden by prompt_preprocess (no extra syntax)
pre_var_paths = set()
# prompt_preprocess (pre-merge DSL): парсим строки до prompt_combine и готовим «пред‑сегменты»
# Синтаксис строки:
# SEGMENT [delKeyContains "needle"] [delpos=prepend|append|N|-1] [pruneEmpty] [case=ci|cs]
# По умолчанию: case=ci, pruneEmpty=false, без delpos → append
# SEGMENT поддерживает [[...]] и {{ ... }}
pre_segments_raw: List[Dict[str, Any]] = []
try:
pre_raw = str((self.config or {}).get("prompt_preprocess") or "").strip()
except Exception:
pre_raw = ""
if pre_raw:
lines = [ln.strip() for ln in pre_raw.splitlines() if str(ln or "").strip()]
import re as _repp
# Collect local overrides for plain [[VAR:path]] segments after filtering
pre_var_overrides: Dict[str, Any] = {}
def _pp_try_json(s: str) -> Any:
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
def _norm(s: Any, ci: bool) -> str:
try:
ss = s if isinstance(s, str) else str(s)
except Exception:
ss = ""
return ss.lower() if ci else ss
def _delkeys_by_val_contains(x: Any, needles: List[str], ci: bool, prune_empty: bool, is_root: bool = False) -> tuple[Any, int]:
"""
Рекурсивно удаляет ключи словаря, если строковое представление их значения содержит needle.
- needles: список подстрок
- ci: регистронезависимый поиск (по умолчанию True)
- prune_empty: удалять пустые {} / [] из родителей (кроме корня)
Возвращает (новое_значение_или_None, удалённые_ключи_суммарно)
"""
removed = 0
if isinstance(x, dict):
out: Dict[str, Any] = {}
for k, v in x.items():
v2, rem2 = _delkeys_by_val_contains(v, needles, ci, prune_empty, False)
removed += rem2
sv = _stringify_for_template(v2)
cond = False
try:
nsv = _norm(sv, ci)
for nd in needles:
nds = _norm(nd, ci)
if nds and (nds in nsv):
cond = True
break
except Exception:
cond = False
if cond:
removed += 1
continue
out[k] = v2
if prune_empty and not is_root and len(out) == 0:
return None, removed # type: ignore[return-value]
return out, removed
if isinstance(x, list):
arr: List[Any] = []
for it in x:
it2, rem2 = _delkeys_by_val_contains(it, needles, ci, prune_empty, False)
removed += rem2
if it2 is None and prune_empty:
continue
arr.append(it2)
if prune_empty and not is_root and len(arr) == 0:
return None, removed # type: ignore[return-value]
return arr, removed
# scalar
return x, removed
pre_used = 0
pre_removed_total = 0
for ln in lines:
try:
# Опции
needles = [m.group(1) for m in _repp.finditer(r'(?is)\bdelKeyContains\s+"([^"]*)"', ln)]
mpos = _repp.search(r'(?is)\bdelpos\s*=\s*(prepend|append|-?\d+)\b', ln)
pos_spec = mpos.group(1).strip().lower() if mpos else None
mcase = _repp.search(r'(?is)\bcase\s*=\s*(ci|cs)\b', ln)
ci = True if not mcase else (mcase.group(1).strip().lower() == "ci")
prune = bool(_repp.search(r'(?is)\bpruneEmpty\b', ln))
# Очищаем директивы из текста строки → остаётся сам SEGMENT
s2 = _repp.sub(r'(?is)\bdelKeyContains\s+"[^"]*"', "", ln)
s2 = _repp.sub(r'(?is)\bdelpos\s*=\s*(prepend|append|-?\d+)\b', "", s2)
s2 = _repp.sub(r'(?is)\bcase\s*=\s*(ci|cs)\b', "", s2)
s2 = _repp.sub(r'(?is)\bpruneEmpty\b', "", s2)
seg = s2.strip()
if not seg:
continue
# Try to detect plain [[VAR:path]] to support node-local override without extra syntax
var_path = None
try:
mvar = _VAR_MACRO_RE.fullmatch(seg)
if mvar:
var_path = (mvar.group(1) or "").strip()
except Exception:
var_path = None
# Макросы и попытка распарсить JSON
resolved = render_template_simple(seg, render_ctx, render_ctx.get("OUT") or {})
obj = _pp_try_json(resolved)
base = obj if obj is not None else resolved
# Удаление ключей по contains, если задано
if needles:
try:
base2, remcnt = _delkeys_by_val_contains(base, needles, ci, prune, True)
except Exception:
base2, remcnt = base, 0
else:
base2, remcnt = base, 0
# If the segment was a pure [[VAR:path]] and we had filters,
# locally override this VAR for the rest of the node (so prompt_combine sees the filtered value)
try:
if var_path and needles:
pre_var_overrides[var_path] = base2
except Exception:
pass
pre_segments_raw.append({"obj": base2, "pos": pos_spec})
pre_used += 1
pre_removed_total += int(remcnt or 0)
except Exception:
continue
# Apply local VAR overrides onto render_ctx (node-local, no extra syntax)
try:
import copy as _copy
def _safe_deepcopy(x: Any) -> Any:
try:
return _copy.deepcopy(x)
except Exception:
# Fallback deep copy for dict/list; otherwise return as-is
try:
if isinstance(x, dict):
return {k: _safe_deepcopy(v) for k, v in x.items()}
if isinstance(x, list):
return [_safe_deepcopy(i) for i in x]
except Exception:
pass
try:
return json.loads(json.dumps(x))
except Exception:
return x
def _set_by_path(obj: Any, path: str, value: Any) -> None:
cur = obj
parts = [p.strip() for p in str(path).split(".") if p.strip()]
for i, part in enumerate(parts):
# list index?
idx = None
try:
idx = int(part)
except Exception:
idx = None
last = (i == len(parts) - 1)
if idx is not None:
if not isinstance(cur, list) or idx < 0 or idx >= len(cur):
return
if last:
cur[idx] = value
return
cur = cur[idx]
continue
# dict key
if not isinstance(cur, dict):
return
if last:
cur[part] = value
return
if part not in cur or not isinstance(cur[part], (dict, list)):
cur[part] = {}
cur = cur[part]
# Deep-copy only the top-level roots we are going to mutate (e.g., 'incoming' for 'incoming.*')
roots_to_copy: set = set()
for _p in (pre_var_overrides or {}).keys():
if "." in str(_p):
roots_to_copy.add(str(_p).split(".", 1)[0].strip())
for _root in roots_to_copy:
try:
if _root in render_ctx:
render_ctx[_root] = _safe_deepcopy(render_ctx[_root])
except Exception:
pass
for _p, _v in (pre_var_overrides or {}).items():
_set_by_path(render_ctx, _p, _v)
pre_var_paths = set(pre_var_overrides.keys())
except Exception:
pre_var_paths = set()
# SSE: prompt_preprocess summary
try:
trace_fn = context.get("_trace")
if trace_fn:
await trace_fn({
"event": "prompt_preprocess",
"node_id": self.node_id,
"node_type": self.type_name,
"provider": provider,
"lines": len(lines),
"used": pre_used,
"removed_keys": pre_removed_total,
"ts": int(time.time() * 1000),
})
except Exception:
pass
else:
pre_segments_raw = []
# prompt_combine (DSL "&"): комбинируем сегменты в заданном порядке.
# Расширения:
@@ -2090,7 +2443,35 @@ class ProviderCallNode(Node):
combine_raw = str(cfg.get("prompt_combine") or "").strip()
except Exception:
combine_raw = ""
if combine_raw:
# Быстрая ветка: если есть адаптер — выполняем merge сегментов через него и коротко-замыкаем легаси-блок ниже
raw_segs_for_adapter = [s.strip() for s in combine_raw.split("&") if str(s or "").strip()]
if adapter and (raw_segs_for_adapter or (pre_segments_raw and len(pre_segments_raw) > 0)):
try:
pm_struct = adapter.combine_segments(
blocks_struct=blocks_struct,
pre_segments_raw=pre_segments_raw,
raw_segs=raw_segs_for_adapter,
render_ctx=render_ctx,
pre_var_paths=pre_var_paths,
render_template_simple_fn=render_template_simple,
var_macro_fullmatch_re=_VAR_MACRO_RE,
detect_vendor_fn=detect_vendor,
)
# обновим pm в контексте
try:
render_ctx["pm"] = pm_struct
except Exception:
pass
except Exception as _e:
try:
print(f"TRACE adapter_combine_error: node={self.node_id} provider={provider} err={_e}")
except Exception:
pass
else:
# коротко-замкнём легаси-блок prompt_combine ниже
pre_segments_raw = []
combine_raw = ""
if combine_raw or (pre_segments_raw and len(pre_segments_raw) > 0):
raw_segs = [s.strip() for s in combine_raw.split("&") if str(s or "").strip()]
def _try_json(s: str) -> Any:
@@ -2547,12 +2928,36 @@ class ProviderCallNode(Node):
if provider in {"gemini", "gemini_image"}:
built: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# Preprocess-inserted segments (prompt_preprocess)
for _pre in pre_segments_raw:
try:
_obj = _pre.get("obj")
items = _as_gemini_contents(_obj)
items = _filter_gemini(items)
built = _insert_items(built, items, _pre.get("pos"))
try:
sx = _extract_sys_text_from_obj(_obj)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
for raw_seg in raw_segs:
body_seg, pos_spec = _split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = _filter_gemini(list(blocks_struct.get("contents", []) or []))
built = _insert_items(built, items, pos_spec)
continue
m_pre = _VAR_MACRO_RE.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Skip duplicate var segment - already inserted via prompt_preprocess (filtered)
continue
except Exception:
pass
resolved = render_template_simple(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
# provider guess + system extract for cross-provider combine
@@ -2585,12 +2990,36 @@ class ProviderCallNode(Node):
elif provider in {"openai"}:
built2: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# Preprocess-inserted segments (prompt_preprocess)
for _pre in pre_segments_raw:
try:
_obj = _pre.get("obj")
items = _as_openai_messages(_obj)
items = _filter_openai(items)
built2 = _insert_items(built2, items, _pre.get("pos"))
try:
sx = _extract_sys_text_from_obj(_obj)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
for raw_seg in raw_segs:
body_seg, pos_spec = _split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = _filter_openai(list(blocks_struct.get("messages", []) or []))
built2 = _insert_items(built2, items, pos_spec)
continue
m_pre = _VAR_MACRO_RE.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Skip duplicate var segment - already inserted via prompt_preprocess (filtered)
continue
except Exception:
pass
resolved = render_template_simple(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
try:
@@ -2622,12 +3051,36 @@ class ProviderCallNode(Node):
else: # claude
built3: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# Preprocess-inserted segments (prompt_preprocess)
for _pre in pre_segments_raw:
try:
_obj = _pre.get("obj")
items = _as_claude_messages(_obj)
items = _filter_claude(items)
built3 = _insert_items(built3, items, _pre.get("pos"))
try:
sx = _extract_sys_text_from_obj(_obj)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
for raw_seg in raw_segs:
body_seg, pos_spec = _split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = _filter_claude(list(blocks_struct.get("messages", []) or []))
built3 = _insert_items(built3, items, pos_spec)
continue
m_pre = _VAR_MACRO_RE.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Skip duplicate var segment - already inserted via prompt_preprocess (filtered)
continue
except Exception:
pass
resolved = render_template_simple(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
try:
@@ -2652,6 +3105,9 @@ class ProviderCallNode(Node):
if isinstance(existing_sys, list):
sys_blocks.extend(existing_sys)
st0 = blocks_struct.get("system_text") or ""
# Ensure PROMPT system_text from blocks is included as a Claude system block
if isinstance(st0, str) and st0.strip():
sys_blocks.append({"type": "text", "text": st0})
for s in sys_texts:
sys_blocks.append({"type": "text", "text": s})
st = "\n\n".join([t for t in [st0] + sys_texts if isinstance(t, str) and t.strip()])
@@ -2671,8 +3127,11 @@ class ProviderCallNode(Node):
else:
# Prefer top-level system as plain string (proxy compatibility)
pm_struct = {"messages": built3, "system_text": st}
if st:
pm_struct["system"] = st
# Prefer array of system blocks when possible; fallback to single text block
if sys_blocks:
pm_struct["system"] = sys_blocks
elif st:
pm_struct["system"] = [{"type": "text", "text": st}]
# SSE метрика
try:
@@ -2695,49 +3154,41 @@ class ProviderCallNode(Node):
# Единый JSON-фрагмент PROMPT для шаблонов: [[PROMPT]]
prompt_fragment = ""
try:
if provider == "openai":
prompt_fragment = '"messages": ' + json.dumps(pm_struct.get("messages", []), ensure_ascii=False)
elif provider == "gemini":
parts = []
contents = pm_struct.get("contents")
if contents is not None:
parts.append('"contents": ' + json.dumps(contents, ensure_ascii=False))
sysi = pm_struct.get("systemInstruction")
# даже если пустой объект {}, это валидно
if sysi is not None:
parts.append('"systemInstruction": ' + json.dumps(sysi, ensure_ascii=False))
prompt_fragment = ", ".join(parts)
elif provider == "gemini_image":
# Используем ту же структуру PROMPT, что и для Gemini (generateContent)
parts = []
contents = pm_struct.get("contents")
if contents is not None:
parts.append('"contents": ' + json.dumps(contents, ensure_ascii=False))
sysi = pm_struct.get("systemInstruction")
if sysi is not None:
parts.append('"systemInstruction": ' + json.dumps(sysi, ensure_ascii=False))
prompt_fragment = ", ".join(parts)
elif provider == "claude":
parts = []
# Учитываем флаг совместимости: при claude_no_system не добавляем top-level "system"
claude_no_system = False
try:
claude_no_system = bool((self.config or {}).get("claude_no_system", False))
except Exception:
if adapter:
prompt_fragment = adapter.prompt_fragment(pm_struct, self.config or {})
else:
if provider == "openai":
prompt_fragment = '"messages": ' + json.dumps(pm_struct.get("messages", []), ensure_ascii=False)
elif provider in {"gemini", "gemini_image"}:
parts = []
contents = pm_struct.get("contents")
if contents is not None:
parts.append('"contents": ' + json.dumps(contents, ensure_ascii=False))
sysi = pm_struct.get("systemInstruction")
if sysi is not None:
parts.append('"systemInstruction": ' + json.dumps(sysi, ensure_ascii=False))
prompt_fragment = ", ".join(parts)
elif provider == "claude":
parts = []
# Учитываем флаг совместимости: при claude_no_system не добавляем top-level "system"
claude_no_system = False
try:
claude_no_system = bool((self.config or {}).get("claude_no_system", False))
except Exception:
claude_no_system = False
if not claude_no_system:
# Предпочитаем массив блоков system, если он есть; иначе строковый system_text
sys_val = pm_struct.get("system", None)
if sys_val is None:
sys_val = pm_struct.get("system_text")
if sys_val:
parts.append('"system": ' + json.dumps(sys_val, ensure_ascii=False))
if not claude_no_system:
# Предпочитаем массив блоков system, если он есть; иначе строковый system_text
sys_val = pm_struct.get("system", None)
if sys_val is None:
sys_val = pm_struct.get("system_text")
if sys_val:
parts.append('"system": ' + json.dumps(sys_val, ensure_ascii=False))
msgs = pm_struct.get("messages")
if msgs is not None:
parts.append('"messages": ' + json.dumps(msgs, ensure_ascii=False))
prompt_fragment = ", ".join(parts)
msgs = pm_struct.get("messages")
if msgs is not None:
parts.append('"messages": ' + json.dumps(msgs, ensure_ascii=False))
prompt_fragment = ", ".join(parts)
except Exception: # noqa: BLE001
prompt_fragment = ""
render_ctx["PROMPT"] = prompt_fragment
@@ -2809,6 +3260,24 @@ class ProviderCallNode(Node):
pass
# SSE: http_req (как в Burp)
req_id = f"{self.node_id}-{int(time.time()*1000)}"
# Register original (untrimmed) request details for manual resend
try:
pipeline_id = str((context.get("meta") or {}).get("id", "pipeline_editor"))
except Exception:
pipeline_id = "pipeline_editor"
try:
register_http_request(req_id, {
"pipeline_id": pipeline_id,
"node_id": self.node_id,
"node_type": self.type_name,
"provider": provider,
"method": "POST",
"url": url,
"headers": dict(final_headers),
"body_json": payload, # untrimmed original payload
})
except Exception:
pass
try:
trace_fn = context.get("_trace")
if trace_fn:
@@ -2836,7 +3305,26 @@ class ProviderCallNode(Node):
st: Optional[int] = None
async with build_client(timeout=timeout_sec) as client:
body_bytes = json.dumps(payload, ensure_ascii=False).encode("utf-8")
resp = await client.post(url, content=body_bytes, headers=final_headers)
# Cooperative cancel pre-check (avoid starting new HTTP on abort)
try:
pipeline_id = str((context.get("meta") or {}).get("id", "pipeline_editor"))
except Exception:
pipeline_id = "pipeline_editor"
try:
if is_cancelled(pipeline_id) and get_cancel_mode(pipeline_id) == "abort":
try:
print(f"TRACE http_cancel_pre: {self.node_id} abort before request")
except Exception:
pass
raise ExecutionError("Cancelled by user (abort)")
except Exception:
pass
async def _do_post():
return await client.post(url, content=body_bytes, headers=final_headers)
# Await HTTP with cooperative cancel/abort handling
resp = await _await_coro_with_cancel(_do_post(), pipeline_id)
# Do not raise_for_status: keep body/logs on 4xx/5xx
try:
print("===== ProviderCall RESPONSE BEGIN =====")
@@ -3166,13 +3654,19 @@ class RawForwardNode(Node):
# Автодетекция вендора для базового URL если base_url не задан
if not base_url:
vendor = detect_vendor(raw_payload)
if vendor == "openai":
base_url = "https://api.openai.com"
elif vendor == "claude":
base_url = "https://api.anthropic.com"
elif vendor == "gemini":
base_url = "https://generativelanguage.googleapis.com"
else:
base_url = None
try:
base_url = _adapter_default_base_url_for(vendor)
except Exception:
base_url = None
if not base_url:
if vendor == "openai":
base_url = "https://api.openai.com"
elif vendor == "claude":
base_url = "https://api.anthropic.com"
elif vendor == "gemini":
base_url = "https://generativelanguage.googleapis.com"
if not base_url:
raise ExecutionError(
f"Node {self.node_id} ({self.type_name}): 'base_url' is not configured and vendor could not be detected."
)
@@ -3288,6 +3782,31 @@ class RawForwardNode(Node):
# SSE: http_req
req_id = f"{self.node_id}-{int(time.time()*1000)}"
# Register original (untrimmed) request details for manual resend
try:
pipeline_id = str((context.get("meta") or {}).get("id", "pipeline_editor"))
except Exception:
pipeline_id = "pipeline_editor"
try:
reg_info: Dict[str, Any] = {
"pipeline_id": pipeline_id,
"node_id": self.node_id,
"node_type": self.type_name,
"method": method,
"url": url,
"headers": dict(headers),
}
if raw_payload is not None:
reg_info["body_json"] = raw_payload # original JSON body
# Always keep original textual/bytes preview for non-JSON
reg_info["body_text"] = body_text
try:
reg_info["body_bytes_len"] = int(len(body_bytes or b""))
except Exception:
pass
register_http_request(req_id, reg_info)
except Exception:
pass
try:
trace_fn = context.get("_trace")
if trace_fn:
@@ -3309,7 +3828,26 @@ class RawForwardNode(Node):
async with build_client(timeout=timeout_sec) as client:
# Для GET/HEAD обычно не отправляем body
send_content = None if method in {"GET", "HEAD"} else body_bytes
resp = await client.request(method, url, headers=headers, content=send_content)
# Cooperative cancel pre-check (avoid starting new HTTP on abort)
try:
pipeline_id = str((context.get("meta") or {}).get("id", "pipeline_editor"))
except Exception:
pipeline_id = "pipeline_editor"
try:
if is_cancelled(pipeline_id) and get_cancel_mode(pipeline_id) == "abort":
try:
print(f"TRACE http_cancel_pre: {self.node_id} abort before request")
except Exception:
pass
raise ExecutionError("Cancelled by user (abort)")
except Exception:
pass
async def _do_req():
return await client.request(method, url, headers=headers, content=send_content)
# Await HTTP with cooperative cancel/abort handling
resp = await _await_coro_with_cancel(_do_req(), pipeline_id)
# Ответ: лог/печать
try:
@@ -3628,6 +4166,22 @@ async def _providercall_run_with_while(self, inputs, context):
last_idx = -1
for i in range(max_iters):
# Cancel check before starting next iteration
try:
pid = str((context.get("meta") or {}).get("id", "pipeline_editor"))
except Exception:
pid = "pipeline_editor"
try:
if is_cancelled(pid):
mode = get_cancel_mode(pid)
try:
print(f"TRACE while_cancel: {self.node_id} mode={mode} at i={i} (pre)")
except Exception:
pass
break
except Exception:
pass
# Build loop-local context with cycleindex and WAS_ERROR
ctx2 = dict(context or {})
try:
@@ -3651,6 +4205,13 @@ async def _providercall_run_with_while(self, inputs, context):
if i == 0:
cond = True
else:
# Inject previous iteration error flag so [[WAS_ERROR]] in while_expr refers to the last iteration
try:
vmap2 = dict(ctx2.get("vars") or {})
except Exception:
vmap2 = {}
vmap2["WAS_ERROR"] = bool(last_was_error)
ctx2["vars"] = vmap2
# Augment OUT with the last output of this node so [[OUTn]] / [[OUT:nX...]] can see it
try:
out_aug = dict(out_map or {})
@@ -3695,7 +4256,7 @@ async def _providercall_run_with_while(self, inputs, context):
# Single iteration run; reenter node.run with _in_while flag set
try:
inner_out = await self.run(inputs, ctx2)
except Exception as exc: # network or other runtime error
except BaseException as exc:
if ignore:
inner_out = {"result": {"error": str(exc)}, "response_text": ""}
try:
@@ -3769,6 +4330,22 @@ async def _rawforward_run_with_while(self, inputs, context):
last_idx = -1
for i in range(max_iters):
# Cancel check before starting next iteration
try:
pid = str((context.get("meta") or {}).get("id", "pipeline_editor"))
except Exception:
pid = "pipeline_editor"
try:
if is_cancelled(pid):
mode = get_cancel_mode(pid)
try:
print(f"TRACE while_cancel: {self.node_id} mode={mode} at i={i} (pre)")
except Exception:
pass
break
except Exception:
pass
ctx2 = dict(context or {})
try:
vmap = dict(ctx2.get("vars") or {})
@@ -3787,6 +4364,13 @@ async def _rawforward_run_with_while(self, inputs, context):
if i == 0:
cond = True
else:
# Inject previous iteration error flag so [[WAS_ERROR]] in while_expr refers to the last iteration
try:
vmap2 = dict(ctx2.get("vars") or {})
except Exception:
vmap2 = {}
vmap2["WAS_ERROR"] = bool(last_was_error)
ctx2["vars"] = vmap2
try:
out_aug = dict(out_map or {})
out_aug[self.node_id] = dict(last_out or {})
@@ -3829,7 +4413,7 @@ async def _rawforward_run_with_while(self, inputs, context):
try:
inner_out = await self.run(inputs, ctx2)
except Exception as exc:
except BaseException as exc:
if ignore:
inner_out = {"result": {"error": str(exc)}, "response_text": ""}
try:

View File

@@ -594,6 +594,12 @@ def _tokenize_condition_expr(expr: str, context: Dict[str, Any], out_map: Dict[s
while j < n and (expr[j].isalnum() or expr[j] in "._"):
j += 1
word = expr[i:j]
lw = word.lower()
# Литералы: true/false/null (любая раскладка) → Python-константы
if re.fullmatch(r"[A-Za-z_][A-Za-z0-9_]*", word) and lw in {"true", "false", "null"}:
tokens.append("True" if lw == "true" else ("False" if lw == "false" else "None"))
i = j
continue
# Поддержка «голых» идентификаторов из vars: cycleindex, WAS_ERROR и т.п.
# Если это простой идентификатор (без точек) и он есть в context.vars — биндим его значением.
try:
@@ -752,17 +758,19 @@ def _safe_eval_bool(py_expr: str, bindings: Dict[str, Any]) -> bool:
if isinstance(node.op, ast.Not):
return (not val)
if isinstance(node, ast.BoolOp) and isinstance(node.op, tuple(allowed_boolops)):
vals = [bool(eval_node(v)) for v in node.values]
# Короткое замыкание:
# AND — при первом False прекращаем и возвращаем False; иначе True
# OR — при первом True прекращаем и возвращаем True; иначе False
if isinstance(node.op, ast.And):
res = True
for v in vals:
res = res and v
return res
for v in node.values:
if not bool(eval_node(v)):
return False
return True
if isinstance(node.op, ast.Or):
res = False
for v in vals:
res = res or v
return res
for v in node.values:
if bool(eval_node(v)):
return True
return False
if isinstance(node, ast.Compare):
left = eval_node(node.left)
for opnode, comparator in zip(node.ops, node.comparators):

View File

@@ -0,0 +1,34 @@
from __future__ import annotations
"""
Пакет адаптеров провайдеров для ProviderCall.
Экспортируем:
- ProviderAdapter базовый класс
- Реализации: OpenAIAdapter, GeminiAdapter, GeminiImageAdapter, ClaudeAdapter
- Утилиты: default_base_url_for, insert_items, split_pos_spec
"""
from .base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
default_base_url_for,
insert_items,
split_pos_spec,
)
from .openai import OpenAIAdapter # [OpenAIAdapter](agentui/providers/adapters/openai.py:39)
from .gemini import ( # [GeminiAdapter](agentui/providers/adapters/gemini.py:56)
GeminiAdapter,
GeminiImageAdapter, # [GeminiImageAdapter](agentui/providers/adapters/gemini.py:332)
)
from .claude import ClaudeAdapter # [ClaudeAdapter](agentui/providers/adapters/claude.py:56)
__all__ = [
"ProviderAdapter",
"OpenAIAdapter",
"GeminiAdapter",
"GeminiImageAdapter",
"ClaudeAdapter",
"default_base_url_for",
"insert_items",
"split_pos_spec",
]

View File

@@ -0,0 +1,148 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
class ProviderAdapter(ABC): # [ProviderAdapter.__init__()](agentui/providers/adapters/base.py:10)
"""
Базовый интерфейс адаптера провайдера для ProviderCall.
Задачи адаптера:
- blocks_struct_for_template: собрать pm_struct из унифицированных сообщений (Prompt Blocks)
- normalize_segment/filter_items: привести произвольный сегмент к целевой провайдерной структуре и отфильтровать пустое
- extract_system_text_from_obj: вытащить системный текст из произвольного сегмента (если он там есть)
- combine_segments: слить pre_segments (prompt_preprocess) и prompt_combine с blocks_struct → итоговый pm_struct
- prompt_fragment: собрать строку JSON-фрагмента для подстановки в [[PROMPT]]
- default_endpoint/default_base_url: дефолты путей и базовых URL
"""
name: str = "base"
# --- Дефолты HTTP ---
@abstractmethod
def default_base_url(self) -> str:
...
@abstractmethod
def default_endpoint(self, model: str) -> str:
...
# --- PROMPT: построение провайдерных структур ---
@abstractmethod
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Из унифицированных сообщений [{role, content}] (включая text+image) собрать pm_struct
для целевого провайдера. Результат должен быть совместим с текущей логикой [[PROMPT]].
"""
...
@abstractmethod
def normalize_segment(self, obj: Any) -> List[Dict[str, Any]]:
"""
Привести произвольный сегмент (dict/list/str/числа) к целевому массиву элементов
(например, messages для openai/claude или contents для gemini).
"""
...
@abstractmethod
def filter_items(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Отфильтровать пустые элементы (пустые тексты и т.п.) согласно правилам провайдера.
"""
...
@abstractmethod
def extract_system_text_from_obj(self, obj: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Вытащить системный текст из произвольного объекта фрагмента:
- OpenAI: messages[*] role=system
- Gemini: systemInstruction.parts[].text
- Claude: top-level system (string/blocks)
Возвращает строку или None.
"""
...
@abstractmethod
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn, # (s, ctx, out_map) -> str
var_macro_fullmatch_re, # _VAR_MACRO_RE.fullmatch
detect_vendor_fn, # detect_vendor
) -> Dict[str, Any]:
"""
Слить blocks_struct c массивами pre_segments_raw и строковыми raw_segs (prompt_combine)
и вернуть итоговый pm_struct. Поведение должно повторять текущее (позиционирование, фильтр пустых,
сбор системного текста).
"""
...
@abstractmethod
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Сформировать строку JSON-фрагмента для [[PROMPT]] по итоговому pm_struct.
"""
...
# --- Общие утилиты для позиционирования и парсинга директив ---------------------
def insert_items(base: List[Any], items: List[Any], pos_spec: Optional[str]) -> List[Any]: # [insert_items()](agentui/providers/adapters/base.py:114)
if not items:
return base
if not pos_spec or str(pos_spec).lower() == "append":
base.extend(items)
return base
p = str(pos_spec).lower()
if p == "prepend":
return list(items) + base
try:
idx = int(pos_spec) # type: ignore[arg-type]
if idx < 0:
idx = len(base) + idx
if idx < 0:
idx = 0
if idx > len(base):
idx = len(base)
return base[:idx] + list(items) + base[idx:]
except Exception:
base.extend(items)
return base
def split_pos_spec(s: str) -> Tuple[str, Optional[str]]: # [split_pos_spec()](agentui/providers/adapters/base.py:135)
"""
Отделить директиву @pos=... от тела сегмента.
Возвращает (body, pos_spec | None).
"""
import re as _re
m = _re.search(r"@pos\s*=\s*(prepend|append|-?\d+)\s*$", str(s or ""), flags=_re.IGNORECASE)
if not m:
return (str(s or "").strip(), None)
body = str(s[: m.start()]).strip()
return (body, str(m.group(1)).strip().lower())
# --- Дефолтные base_url по "вендору" (используется RawForward) ------------------
def default_base_url_for(vendor: str) -> Optional[str]: # [default_base_url_for()](agentui/providers/adapters/base.py:149)
v = (vendor or "").strip().lower()
if v == "openai":
return "https://api.openai.com"
if v == "claude" or v == "anthropic":
return "https://api.anthropic.com"
if v == "gemini" or v == "gemini_image":
return "https://generativelanguage.googleapis.com"
return None

View File

@@ -0,0 +1,475 @@
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
from agentui.providers.adapters.base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
insert_items,
split_pos_spec,
)
def _is_data_url(u: str) -> bool: # [_is_data_url()](agentui/providers/adapters/claude.py:14)
return isinstance(u, str) and u.strip().lower().startswith("data:")
def _split_data_url(u: str) -> tuple[str, str]: # [_split_data_url()](agentui/providers/adapters/claude.py:18)
"""
Возвращает (mime, b64) для data URL.
Поддерживаем форму: data:<mime>;base64,<b64>
"""
try:
header, b64 = u.split(",", 1)
mime = "application/octet-stream"
if header.startswith("data:"):
header2 = header[5:]
if ";base64" in header2:
mime = header2.split(";base64", 1)[0] or mime
elif ";" in header2:
mime = header2.split(";", 1)[0] or mime
elif header2:
mime = header2
return mime, b64
except Exception:
return "application/octet-stream", ""
def _try_json(s: str) -> Any: # [_try_json()](agentui/providers/adapters/claude.py:38)
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
class ClaudeAdapter(ProviderAdapter): # [ClaudeAdapter.__init__()](agentui/providers/adapters/claude.py:56)
name = "claude"
# --- Дефолты HTTP ---
def default_base_url(self) -> str:
return "https://api.anthropic.com"
def default_endpoint(self, model: str) -> str:
return "/v1/messages"
# --- PROMPT: построение провайдерных структур ---
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Совместимо с веткой provider=='claude' из
[ProviderCallNode._blocks_struct_for_template()](agentui/pipeline/executor.py:2022).
"""
# Системные сообщения как текст
sys_msgs = []
for m in (unified_messages or []):
if m.get("role") == "system":
c = m.get("content")
if isinstance(c, list):
sys_msgs.append("\n".join([str(p.get("text") or "") for p in c if isinstance(p, dict) and p.get("type") == "text"]))
else:
sys_msgs.append(str(c or ""))
sys_text = "\n\n".join([s for s in sys_msgs if s]).strip()
out_msgs = []
for m in (unified_messages or []):
if m.get("role") == "system":
continue
role = m.get("role")
role = role if role in {"user", "assistant"} else "user"
c = m.get("content")
blocks: List[Dict[str, Any]] = []
if isinstance(c, list):
for p in c:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
blocks.append({"type": "text", "text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = str(p.get("url") or "")
if _is_data_url(url):
mime, b64 = _split_data_url(url)
blocks.append({"type": "image", "source": {"type": "base64", "media_type": mime, "data": b64}})
else:
blocks.append({"type": "image", "source": {"type": "url", "url": url}})
else:
blocks.append({"type": "text", "text": str(c or "")})
out_msgs.append({"role": role, "content": blocks})
claude_no_system = False
try:
claude_no_system = bool((node_config or {}).get("claude_no_system", False))
except Exception:
claude_no_system = False
if claude_no_system:
if sys_text:
out_msgs = [{"role": "user", "content": [{"type": "text", "text": sys_text}]}] + out_msgs
return {
"messages": out_msgs,
"system_text": sys_text,
}
d = {
"system_text": sys_text,
"messages": out_msgs,
}
if sys_text:
# Prefer system as a plain string (proxy compatibility)
d["system"] = sys_text
return d
def normalize_segment(self, x: Any) -> List[Dict[str, Any]]:
"""
Совместимо с [_as_claude_messages()](agentui/pipeline/executor.py:2602).
"""
msgs: List[Dict[str, Any]] = []
try:
if isinstance(x, dict):
# Dict with messages (OpenAI-like)
if isinstance(x.get("messages"), list):
x = x.get("messages") or []
# fallthrough to list mapping below
elif isinstance(x.get("contents"), list):
# Gemini -> Claude
for c in (x.get("contents") or []):
if not isinstance(c, dict):
continue
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
parts = c.get("parts") or []
text = "\n".join([str(p.get("text")) for p in parts if isinstance(p, dict) and isinstance(p.get("text"), str)]).strip()
msgs.append({"role": role, "content": [{"type": "text", "text": text}]})
return msgs
if isinstance(x, list):
# Gemini contents list -> Claude messages
if all(isinstance(c, dict) and "parts" in c for c in x):
for c in x:
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
blocks: List[Dict[str, Any]] = []
for p in (c.get("parts") or []):
if isinstance(p, dict) and isinstance(p.get("text"), str):
txt = p.get("text").strip()
if txt:
blocks.append({"type": "text", "text": txt})
msgs.append({"role": role, "content": blocks or [{"type": "text", "text": ""}]})
return msgs
# OpenAI messages list -> Claude
if all(isinstance(m, dict) and "content" in m for m in x):
out: List[Dict[str, Any]] = []
for m in x:
role = m.get("role", "user")
cont = m.get("content")
blocks: List[Dict[str, Any]] = []
if isinstance(cont, str):
blocks.append({"type": "text", "text": cont})
elif isinstance(cont, list):
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
blocks.append({"type": "text", "text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
blocks.append({"type": "image", "source": {"type": "url", "url": url}})
else:
blocks.append({"type": "text", "text": json.dumps(cont, ensure_ascii=False)})
out.append({"role": role if role in {"user", "assistant"} else "user", "content": blocks})
return out
# Fallback
return [{"role": "user", "content": [{"type": "text", "text": json.dumps(x, ensure_ascii=False)}]}]
if isinstance(x, str):
try_obj = _try_json(x)
if try_obj is not None:
return self.normalize_segment(try_obj)
return [{"role": "user", "content": [{"type": "text", "text": x}]}]
return [{"role": "user", "content": [{"type": "text", "text": json.dumps(x, ensure_ascii=False)}]}]
except Exception:
return [{"role": "user", "content": [{"type": "text", "text": str(x)}]}]
def filter_items(self, arr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Совместимо с [_filter_claude()](agentui/pipeline/executor.py:2820).
"""
out: List[Dict[str, Any]] = []
for m in (arr or []):
if not isinstance(m, dict):
continue
blocks = m.get("content")
if isinstance(blocks, list):
norm = []
for b in blocks:
if isinstance(b, dict) and b.get("type") == "text":
txt = str(b.get("text") or "")
if txt.strip():
norm.append({"type": "text", "text": txt})
if norm:
out.append({"role": m.get("role", "user"), "content": norm})
return out
def extract_system_text_from_obj(self, x: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Поведение совместимо с [_extract_sys_text_from_obj()](agentui/pipeline/executor.py:2676).
"""
try:
# Dict objects
if isinstance(x, dict):
# Gemini systemInstruction
if "systemInstruction" in x:
si = x.get("systemInstruction")
def _parts_to_text(siobj: Any) -> str:
try:
parts = siobj.get("parts") or []
texts = [
str(p.get("text") or "")
for p in parts
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip()
]
return "\n".join([t for t in texts if t]).strip()
except Exception:
return ""
if isinstance(si, dict):
t = _parts_to_text(si)
if t:
return t
if isinstance(si, list):
texts = []
for p in si:
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip():
texts.append(p.get("text").strip())
t = "\n".join(texts).strip()
if t:
return t
if isinstance(si, str) and si.strip():
return si.strip()
# Claude system (string or blocks)
if "system" in x and not ("messages" in x and isinstance(x.get("messages"), list)):
sysv = x.get("system")
if isinstance(sysv, str) and sysv.strip():
return sysv.strip()
if isinstance(sysv, list):
texts = [
str(b.get("text") or "")
for b in sysv
if isinstance(b, dict)
and (b.get("type") == "text")
and isinstance(b.get("text"), str)
and b.get("text").strip()
]
t = "\n".join([t for t in texts if t]).strip()
if t:
return t
# OpenAI messages with role=system
if isinstance(x.get("messages"), list):
sys_msgs = []
for m in (x.get("messages") or []):
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if isinstance(p, dict) and p.get("type") == "text" and isinstance(p.get("text"), str) and p.get("text").strip():
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# List objects
if isinstance(x, list):
# OpenAI messages list with role=system
if all(isinstance(m, dict) and "role" in m for m in x):
sys_msgs = []
for m in x:
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if isinstance(p, dict) and p.get("type") == "text" and isinstance(p.get("text"), str) and p.get("text").strip():
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# Gemini 'contents' list: попробуем прочитать systemInstruction из входящего snapshot
if all(isinstance(c, dict) and "parts" in c for c in x):
try:
inc = (render_ctx.get("incoming") or {}).get("json") or {}
si = inc.get("systemInstruction")
if si is not None:
return self.extract_system_text_from_obj({"systemInstruction": si}, render_ctx)
except Exception:
pass
return None
except Exception:
return None
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn,
var_macro_fullmatch_re,
detect_vendor_fn,
) -> Dict[str, Any]:
"""
Повторяет ветку provider=='claude' из prompt_combine
([ProviderCallNode.run()](agentui/pipeline/executor.py:2998)).
"""
built3: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# Нода-конфиг (для claude_no_system) передан через render_ctx['_node_config'], см. интеграцию
node_cfg = {}
try:
nc = render_ctx.get("_node_config")
if isinstance(nc, dict):
node_cfg = nc
except Exception:
node_cfg = {}
claude_no_system = False
try:
claude_no_system = bool(node_cfg.get("claude_no_system", False))
except Exception:
claude_no_system = False
# Пред‑сегменты
for _pre in (pre_segments_raw or []):
try:
_obj = _pre.get("obj")
items = self.normalize_segment(_obj)
items = self.filter_items(items)
built3 = insert_items(built3, items, _pre.get("pos"))
try:
sx = self.extract_system_text_from_obj(_obj, render_ctx)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
# Основные сегменты
for raw_seg in (raw_segs or []):
body_seg, pos_spec = split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = self.filter_items(list(blocks_struct.get("messages", []) or []))
built3 = insert_items(built3, items, pos_spec)
continue
m_pre = var_macro_fullmatch_re.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Skip duplicate var segment - already inserted via prompt_preprocess (filtered)
continue
except Exception:
pass
resolved = render_template_simple_fn(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
try:
pg = detect_vendor_fn(obj if isinstance(obj, dict) else {})
print(f"DEBUG: prompt_combine seg provider_guess={pg} -> target=claude pos={pos_spec}")
except Exception:
pass
items = self.normalize_segment(obj if obj is not None else resolved)
items = self.filter_items(items)
built3 = insert_items(built3, items, pos_spec)
try:
sx = self.extract_system_text_from_obj(obj, render_ctx) if obj is not None else None
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
if not built3:
built3 = self.filter_items(list(blocks_struct.get("messages", []) or []))
# Merge system blocks from PROMPT blocks + gathered sys_texts
existing_sys = blocks_struct.get("system") or []
sys_blocks: List[Dict[str, Any]] = []
if isinstance(existing_sys, list):
sys_blocks.extend(existing_sys)
st0 = blocks_struct.get("system_text") or ""
# Ensure PROMPT system_text from blocks is included as a Claude system block
if isinstance(st0, str) and st0.strip():
sys_blocks.append({"type": "text", "text": st0})
for s in sys_texts:
sys_blocks.append({"type": "text", "text": s})
st = "\n\n".join([t for t in [st0] + sys_texts if isinstance(t, str) and t.strip()])
if claude_no_system:
# Prepend system text as a user message instead of top-level system
if st:
built3 = [{"role": "user", "content": [{"type": "text", "text": st}]}] + built3
return {"messages": built3, "system_text": st}
pm_struct = {"messages": built3, "system_text": st}
# Prefer array of system blocks when possible; fallback to single text block
if sys_blocks:
pm_struct["system"] = sys_blocks
elif st:
pm_struct["system"] = [{"type": "text", "text": st}]
return pm_struct
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Совместимо с веткой provider=='claude' в построении [[PROMPT]]
([ProviderCallNode.run()](agentui/pipeline/executor.py:3125)).
"""
parts: List[str] = []
# Учитываем флаг совместимости: при claude_no_system не добавляем top-level "system"
claude_no_system = False
try:
claude_no_system = bool((node_config or {}).get("claude_no_system", False))
except Exception:
claude_no_system = False
if not claude_no_system:
# Предпочитаем массив блоков system, если он есть; иначе строковый system_text
sys_val = pm_struct.get("system", None)
if sys_val is None:
sys_val = pm_struct.get("system_text")
if sys_val:
parts.append('"system": ' + json.dumps(sys_val, ensure_ascii=False))
msgs = pm_struct.get("messages")
if msgs is not None:
parts.append('"messages": ' + json.dumps(msgs, ensure_ascii=False))
return ", ".join(parts)

View File

@@ -0,0 +1,419 @@
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional, Tuple
from agentui.providers.adapters.base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
insert_items,
split_pos_spec,
)
def _is_data_url(u: str) -> bool: # [_is_data_url()](agentui/providers/adapters/gemini.py:14)
return isinstance(u, str) and u.strip().lower().startswith("data:")
def _split_data_url(u: str) -> tuple[str, str]: # [_split_data_url()](agentui/providers/adapters/gemini.py:18)
"""
Возвращает (mime, b64) для data URL.
Поддерживаем форму: data:<mime>;base64,<b64>
"""
try:
header, b64 = u.split(",", 1)
mime = "application/octet-stream"
if header.startswith("data:"):
header2 = header[5:]
if ";base64" in header2:
mime = header2.split(";base64", 1)[0] or mime
elif ";" in header2:
mime = header2.split(";", 1)[0] or mime
elif header2:
mime = header2
return mime, b64
except Exception:
return "application/octet-stream", ""
def _try_json(s: str) -> Any: # [_try_json()](agentui/providers/adapters/gemini.py:38)
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
class GeminiAdapter(ProviderAdapter): # [GeminiAdapter.__init__()](agentui/providers/adapters/gemini.py:56)
name = "gemini"
# --- Дефолты HTTP ---
def default_base_url(self) -> str:
return "https://generativelanguage.googleapis.com"
def default_endpoint(self, model: str) -> str:
# endpoint с шаблоном model (как в исходном коде)
return "/v1beta/models/{{ model }}:generateContent"
# --- PROMPT: построение провайдерных структур ---
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Совместимо с веткой provider in {'gemini','gemini_image'} из
[ProviderCallNode._blocks_struct_for_template()](agentui/pipeline/executor.py:1981).
"""
def _text_from_msg(m: Dict[str, Any]) -> str:
c = m.get("content")
if isinstance(c, list):
texts = [str(p.get("text") or "") for p in c if isinstance(p, dict) and p.get("type") == "text"]
return "\n".join([t for t in texts if t])
return str(c or "")
sys_text = "\n\n".join([_text_from_msg(m) for m in (unified_messages or []) if m.get("role") == "system"]).strip()
contents: List[Dict[str, Any]] = []
for m in (unified_messages or []):
if m.get("role") == "system":
continue
role = "model" if m.get("role") == "assistant" else "user"
c = m.get("content")
parts: List[Dict[str, Any]] = []
if isinstance(c, list):
for p in c:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts.append({"text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = str(p.get("url") or "")
if _is_data_url(url):
mime, b64 = _split_data_url(url)
parts.append({"inline_data": {"mime_type": mime, "data": b64}})
else:
parts.append({"text": url})
else:
parts.append({"text": str(c or "")})
contents.append({"role": role, "parts": parts})
d: Dict[str, Any] = {
"contents": contents,
"system_text": sys_text,
}
if sys_text:
d["systemInstruction"] = {"parts": [{"text": sys_text}]}
return d
def normalize_segment(self, x: Any) -> List[Dict[str, Any]]:
"""
Совместимо с [_as_gemini_contents()](agentui/pipeline/executor.py:2521).
"""
cnts: List[Dict[str, Any]] = []
try:
if isinstance(x, dict):
if isinstance(x.get("contents"), list):
return list(x.get("contents") or [])
if isinstance(x.get("messages"), list):
# OpenAI → Gemini
for m in (x.get("messages") or []):
if not isinstance(m, dict):
continue
role_raw = str(m.get("role") or "user")
role = "model" if role_raw == "assistant" else "user"
cont = m.get("content")
parts: List[Dict[str, Any]] = []
if isinstance(cont, str):
parts = [{"text": cont}]
elif isinstance(cont, list):
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts.append({"text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
# Gemini не принимает внешние URL картинок как image — оставим как текстовую ссылку
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
parts.append({"text": url})
else:
parts = [{"text": json.dumps(cont, ensure_ascii=False)}]
cnts.append({"role": role, "parts": parts})
return cnts
if isinstance(x, list):
# Gemini contents list already
if all(isinstance(c, dict) and "parts" in c for c in x):
return list(x)
# OpenAI messages list -> Gemini
if all(isinstance(m, dict) and "content" in m for m in x):
out: List[Dict[str, Any]] = []
for m in x:
role_raw = str(m.get("role") or "user")
role = "model" if role_raw == "assistant" else "user"
cont = m.get("content")
parts: List[Dict[str, Any]] = []
if isinstance(cont, str):
parts = [{"text": cont}]
elif isinstance(cont, list):
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts.append({"text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
parts.append({"text": url})
else:
parts = [{"text": json.dumps(cont, ensure_ascii=False)}]
out.append({"role": role, "parts": parts})
return out
# Fallback
return [{"role": "user", "parts": [{"text": json.dumps(x, ensure_ascii=False)}]}]
if isinstance(x, str):
try_obj = _try_json(x)
if try_obj is not None:
return self.normalize_segment(try_obj)
return [{"role": "user", "parts": [{"text": x}]}]
return [{"role": "user", "parts": [{"text": json.dumps(x, ensure_ascii=False)}]}]
except Exception:
return [{"role": "user", "parts": [{"text": str(x)}]}]
def filter_items(self, arr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Совместимо с [_filter_gemini()](agentui/pipeline/executor.py:2782).
Сохраняем inline_data/inlineData как есть; текстовые части — только непустые.
"""
out: List[Dict[str, Any]] = []
for it in (arr or []):
if not isinstance(it, dict):
continue
parts = it.get("parts") or []
norm_parts = []
for p in parts:
if isinstance(p, dict):
t = p.get("text")
if isinstance(t, str) and t.strip():
norm_parts.append({"text": t})
elif "inline_data" in p or "inlineData" in p:
norm_parts.append(p) # изображения пропускаем как есть
if norm_parts:
out.append({"role": it.get("role", "user"), "parts": norm_parts})
return out
def extract_system_text_from_obj(self, x: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Совместимо с [_extract_sys_text_from_obj()](agentui/pipeline/executor.py:2676) для Gemini.
"""
try:
# Dict
if isinstance(x, dict):
if "systemInstruction" in x:
si = x.get("systemInstruction")
def _parts_to_text(siobj: Any) -> str:
try:
parts = siobj.get("parts") or []
texts = [
str(p.get("text") or "")
for p in parts
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip()
]
return "\n".join([t for t in texts if t]).strip()
except Exception:
return ""
if isinstance(si, dict):
t = _parts_to_text(si)
if t:
return t
if isinstance(si, list):
texts = []
for p in si:
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip():
texts.append(p.get("text").strip())
t = "\n".join(texts).strip()
if t:
return t
if isinstance(si, str) and si.strip():
return si.strip()
# OpenAI system внутри messages
if isinstance(x.get("messages"), list):
sys_msgs = []
for m in (x.get("messages") or []):
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# List
if isinstance(x, list):
if all(isinstance(m, dict) and "role" in m for m in x):
sys_msgs = []
for m in x:
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# Gemini contents list -> попробуем взять из входящего snapshot
if all(isinstance(c, dict) and "parts" in c for c in x):
try:
inc = (render_ctx.get("incoming") or {}).get("json") or {}
si = inc.get("systemInstruction")
if si is not None:
return self.extract_system_text_from_obj({"systemInstruction": si}, render_ctx)
except Exception:
pass
return None
except Exception:
return None
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn,
var_macro_fullmatch_re,
detect_vendor_fn,
) -> Dict[str, Any]:
"""
Повторяет ветку provider in {'gemini','gemini_image'} из prompt_combine
([ProviderCallNode.run()](agentui/pipeline/executor.py:2874)).
"""
built: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# 1) Пред‑сегменты
for _pre in (pre_segments_raw or []):
try:
_obj = _pre.get("obj")
items = self.normalize_segment(_obj)
items = self.filter_items(items)
built = insert_items(built, items, _pre.get("pos"))
try:
sx = self.extract_system_text_from_obj(_obj, render_ctx)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
# 2) Основные сегменты
for raw_seg in (raw_segs or []):
body_seg, pos_spec = split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = self.filter_items(list(blocks_struct.get("contents", []) or []))
built = insert_items(built, items, pos_spec)
continue
m_pre = var_macro_fullmatch_re.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
continue
except Exception:
pass
resolved = render_template_simple_fn(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
# debug provider guess
try:
pg = detect_vendor_fn(obj if isinstance(obj, dict) else {})
print(f"DEBUG: prompt_combine seg provider_guess={pg} -> target=gemini pos={pos_spec}")
except Exception:
pass
items = self.normalize_segment(obj if obj is not None else resolved)
items = self.filter_items(items)
built = insert_items(built, items, pos_spec)
try:
sx = self.extract_system_text_from_obj(obj, render_ctx) if obj is not None else None
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
if not built:
built = self.filter_items(list(blocks_struct.get("contents", []) or []))
# Merge systemInstruction: PROMPT blocks + gathered sys_texts
existing_si = blocks_struct.get("systemInstruction")
parts = []
if isinstance(existing_si, dict) and isinstance(existing_si.get("parts"), list):
parts = list(existing_si.get("parts") or [])
for s in sys_texts:
parts.append({"text": s})
new_si = {"parts": parts} if parts else existing_si
return {"contents": built, "systemInstruction": new_si, "system_text": blocks_struct.get("system_text")}
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Совместимо с веткой provider in {'gemini','gemini_image'} в построении [[PROMPT]]
([ProviderCallNode.run()](agentui/pipeline/executor.py:3103)).
"""
parts = []
contents = pm_struct.get("contents")
if contents is not None:
parts.append('"contents": ' + json.dumps(contents, ensure_ascii=False))
sysi = pm_struct.get("systemInstruction")
if sysi is not None:
parts.append('"systemInstruction": ' + json.dumps(sysi, ensure_ascii=False))
return ", ".join(parts)
class GeminiImageAdapter(GeminiAdapter): # [GeminiImageAdapter.__init__()](agentui/providers/adapters/gemini.py:332)
name = "gemini_image"
# Вся логика такая же, как у Gemini (generateContent), включая defaults.

View File

@@ -0,0 +1,398 @@
from __future__ import annotations
import json
import re
from typing import Any, Dict, List, Optional, Tuple
from agentui.providers.adapters.base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
insert_items,
split_pos_spec,
)
def _try_json(s: str) -> Any: # [_try_json()](agentui/providers/adapters/openai.py:16)
"""
Парсит JSON из строки. Пермиссивный режим и двукратная распаковка строк, как в старой логике.
Возвращает dict/list/примитив или None при неудаче.
"""
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
# Если это строка, которая сама похожа на JSON — пробуем распаковать до 2 раз
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
class OpenAIAdapter(ProviderAdapter): # [OpenAIAdapter.__init__()](agentui/providers/adapters/openai.py:39)
name = "openai"
# --- Дефолты HTTP ---
def default_base_url(self) -> str:
return "https://api.openai.com"
def default_endpoint(self, model: str) -> str:
return "/v1/chat/completions"
# --- PROMPT: построение провайдерных структур ---
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Совместимо с веткой provider=='openai' из [ProviderCallNode._blocks_struct_for_template()](agentui/pipeline/executor.py:1958).
"""
def _map(m: Dict[str, Any]) -> Dict[str, Any]:
c = m.get("content")
if isinstance(c, list):
parts = []
for p in c:
if isinstance(p, dict) and p.get("type") == "text":
parts.append({"type": "text", "text": str(p.get("text") or "")})
elif isinstance(p, dict) and p.get("type") in {"image_url", "image"}:
url = str(p.get("url") or "")
parts.append({"type": "image_url", "image_url": {"url": url}})
return {"role": m.get("role", "user"), "content": parts}
return {"role": m.get("role", "user"), "content": str(c or "")}
# system_text — склейка всех system-блоков (только текст, без картинок)
sys_text = "\n\n".join(
[
str(m.get("content") or "")
if not isinstance(m.get("content"), list)
else "\n".join(
[str(p.get("text") or "") for p in m.get("content") if isinstance(p, dict) and p.get("type") == "text"]
)
for m in (unified_messages or [])
if m.get("role") == "system"
]
).strip()
return {
"messages": [_map(m) for m in (unified_messages or [])],
"system_text": sys_text,
}
def normalize_segment(self, x: Any) -> List[Dict[str, Any]]:
"""
Совместимо с [_as_openai_messages()](agentui/pipeline/executor.py:2451).
- Поддерживает dict with messages (openai)
- Поддерживает dict/list в стиле Gemini.contents (склейка текстов частей)
- Поддерживает list openai-like messages (нормализация parts)
- Строки/прочее упаковываются как один user message
"""
msgs: List[Dict[str, Any]] = []
try:
# Dict inputs
if isinstance(x, dict):
if isinstance(x.get("messages"), list):
return list(x.get("messages") or [])
if isinstance(x.get("contents"), list):
# Gemini -> OpenAI (text-only join)
for c in (x.get("contents") or []):
if not isinstance(c, dict):
continue
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
parts = c.get("parts") or []
text = "\n".join(
[str(p.get("text")) for p in parts if isinstance(p, dict) and isinstance(p.get("text"), str)]
).strip()
msgs.append({"role": role, "content": text})
return msgs
# List inputs
if isinstance(x, list):
# Gemini contents list -> OpenAI messages
if all(isinstance(c, dict) and "parts" in c for c in x):
for c in x:
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
parts = c.get("parts") or []
text = "\n".join(
[str(p.get("text")) for p in parts if isinstance(p, dict) and isinstance(p.get("text"), str)]
).strip()
msgs.append({"role": role, "content": text})
return msgs
# OpenAI messages list already — normalize parts if needed
if all(isinstance(m, dict) and "content" in m for m in x):
out: List[Dict[str, Any]] = []
for m in x:
role = m.get("role", "user")
cont = m.get("content")
if isinstance(cont, str):
out.append({"role": role, "content": cont})
elif isinstance(cont, list):
parts2: List[Dict[str, Any]] = []
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts2.append({"type": "text", "text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
parts2.append({"type": "image_url", "image_url": {"url": url}})
out.append({"role": role, "content": parts2 if parts2 else ""})
return out
# Fallback: dump JSON as a single user message
return [{"role": "user", "content": json.dumps(x, ensure_ascii=False)}]
# Primitive inputs or embedded JSON string
if isinstance(x, str):
try_obj = _try_json(x)
if try_obj is not None:
return self.normalize_segment(try_obj)
return [{"role": "user", "content": x}]
return [{"role": "user", "content": json.dumps(x, ensure_ascii=False)}]
except Exception:
return [{"role": "user", "content": str(x)}]
def filter_items(self, arr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Совместимо с [_filter_openai()](agentui/pipeline/executor.py:2801).
"""
out: List[Dict[str, Any]] = []
for m in (arr or []):
if not isinstance(m, dict):
continue
c = m.get("content")
if isinstance(c, str) and c.strip():
out.append({"role": m.get("role", "user"), "content": c})
elif isinstance(c, list):
parts = []
for p in c:
if isinstance(p, dict) and p.get("type") == "text":
txt = str(p.get("text") or "")
if txt.strip():
parts.append({"type": "text", "text": txt})
if parts:
out.append({"role": m.get("role", "user"), "content": parts})
return out
def extract_system_text_from_obj(self, x: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Совместимо с [_extract_sys_text_from_obj()](agentui/pipeline/executor.py:2676).
Умеет читать:
- Gemini: systemInstruction.parts[].text
- Claude: top-level system (string/list of blocks)
- OpenAI: messages[*] with role=system (string content or parts[].text)
- List форматы: openai messages list и gemini contents list (в последнем случае смотрит incoming.json.systemInstruction)
"""
try:
# Dict objects
if isinstance(x, dict):
# Gemini systemInstruction
if "systemInstruction" in x:
si = x.get("systemInstruction")
def _parts_to_text(siobj: Any) -> str:
try:
parts = siobj.get("parts") or []
texts = [
str(p.get("text") or "")
for p in parts
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip()
]
return "\n".join([t for t in texts if t]).strip()
except Exception:
return ""
if isinstance(si, dict):
t = _parts_to_text(si)
if t:
return t
if isinstance(si, list):
texts = []
for p in si:
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip():
texts.append(p.get("text").strip())
t = "\n".join(texts).strip()
if t:
return t
if isinstance(si, str) and si.strip():
return si.strip()
# Claude system (string or blocks)
if "system" in x and not ("messages" in x and isinstance(x.get("messages"), list)):
sysv = x.get("system")
if isinstance(sysv, str) and sysv.strip():
return sysv.strip()
if isinstance(sysv, list):
texts = [
str(b.get("text") or "")
for b in sysv
if isinstance(b, dict) and (b.get("type") == "text") and isinstance(b.get("text"), str) and b.get("text").strip()
]
t = "\n".join([t for t in texts if t]).strip()
if t:
return t
# OpenAI messages with role=system
if isinstance(x.get("messages"), list):
sys_msgs = []
for m in (x.get("messages") or []):
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# List objects
if isinstance(x, list):
# OpenAI messages list with role=system
if all(isinstance(m, dict) and "role" in m for m in x):
sys_msgs = []
for m in x:
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# Gemini 'contents' list: try to read systemInstruction from incoming JSON snapshot
if all(isinstance(c, dict) and "parts" in c for c in x):
try:
inc = (render_ctx.get("incoming") or {}).get("json") or {}
si = inc.get("systemInstruction")
if si is not None:
# Рекурсивно используем себя
return self.extract_system_text_from_obj({"systemInstruction": si}, render_ctx)
except Exception:
pass
return None
except Exception:
return None
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn,
var_macro_fullmatch_re,
detect_vendor_fn,
) -> Dict[str, Any]:
"""
Повторяет ветку provider=='openai' из prompt_combine в [ProviderCallNode.run()](agentui/pipeline/executor.py:2936).
"""
built: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# 1) Пред‑сегменты (prompt_preprocess)
for _pre in (pre_segments_raw or []):
try:
_obj = _pre.get("obj")
items = self.normalize_segment(_obj)
items = self.filter_items(items)
built = insert_items(built, items, _pre.get("pos"))
try:
sx = self.extract_system_text_from_obj(_obj, render_ctx)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
# 2) Основные сегменты (prompt_combine)
for raw_seg in (raw_segs or []):
body_seg, pos_spec = split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = self.filter_items(list(blocks_struct.get("messages", []) or []))
built = insert_items(built, items, pos_spec)
continue
# Спрятать дубли plain [[VAR:path]] если уже вставляли этим путём в pre_var_overrides
m_pre = var_macro_fullmatch_re.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Уже вставлено через prompt_preprocess с фильтрацией — пропускаем
continue
except Exception:
pass
resolved = render_template_simple_fn(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
# debug provider guess
try:
pg = detect_vendor_fn(obj if isinstance(obj, dict) else {})
print(f"DEBUG: prompt_combine seg provider_guess={pg} -> target=openai pos={pos_spec}")
except Exception:
pass
items = self.normalize_segment(obj if obj is not None else resolved)
items = self.filter_items(items)
built = insert_items(built, items, pos_spec)
try:
sx = self.extract_system_text_from_obj(obj, render_ctx) if obj is not None else None
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
# Если ничего не собрали — берём исходные blocks
if not built:
built = self.filter_items(list(blocks_struct.get("messages", []) or []))
# Препендинг системных сообщений из sys_texts
if sys_texts:
sys_msgs = [{"role": "system", "content": s} for s in sys_texts if s]
if sys_msgs:
built = sys_msgs + built
# keep system_text for UI/debug
st0 = blocks_struct.get("system_text") or ""
st = "\n\n".join([t for t in [st0] + sys_texts if isinstance(t, str) and t.strip()])
return {"messages": built, "system_text": st}
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Совместимо с веткой provider=='openai' в построении [[PROMPT]] из [ProviderCallNode.run()](agentui/pipeline/executor.py:3103).
"""
return '"messages": ' + json.dumps(pm_struct.get("messages", []), ensure_ascii=False)

View File

@@ -0,0 +1,32 @@
from __future__ import annotations
from typing import Optional
from agentui.providers.adapters.base import ProviderAdapter, default_base_url_for as _default_base_url_for
from agentui.providers.adapters.openai import OpenAIAdapter
try:
from agentui.providers.adapters.gemini import GeminiAdapter, GeminiImageAdapter
except Exception:
GeminiAdapter = None # type: ignore
GeminiImageAdapter = None # type: ignore
try:
from agentui.providers.adapters.claude import ClaudeAdapter
except Exception:
ClaudeAdapter = None # type: ignore
def get_adapter(provider: str) -> Optional[ProviderAdapter]:
p = (provider or "").strip().lower()
if p == "openai":
return OpenAIAdapter()
if p == "gemini" and GeminiAdapter:
return GeminiAdapter() # type: ignore[operator]
if p == "gemini_image" and GeminiImageAdapter:
return GeminiImageAdapter() # type: ignore[operator]
if p == "claude" and ClaudeAdapter:
return ClaudeAdapter() # type: ignore[operator]
return None
def default_base_url_for(vendor: str) -> Optional[str]:
return _default_base_url_for(vendor)