sync: UI animations, select styling, TLS verify flag via proxy second line, brand spacing

This commit is contained in:
2025-09-27 18:46:52 +03:00
parent 135c393eda
commit 2abfbb4b1a
52 changed files with 8029 additions and 1408 deletions

View File

@@ -1,48 +0,0 @@
{
"VAL": 2,
"snapshot": {
"incoming": null,
"params": {},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"vars": {
"VAL": 2
}
},
"n2": {
"result": {
"id": "ret_mock_123",
"object": "chat.completion",
"model": "gpt-x",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "2"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 1,
"total_tokens": 0
}
},
"response_text": "2"
}
},
"OUT_TEXT": {
"n1": "",
"n2": "2"
},
"LAST_NODE": "n2",
"OUT1": "",
"OUT2": "2",
"EXEC_TRACE": "n1(SetVars) -> n2(Return)"
}
}

View File

@@ -1,48 +0,0 @@
{
"TXT": "A | B | C",
"snapshot": {
"incoming": null,
"params": {},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"vars": {
"TXT": "A | B | C"
}
},
"n2": {
"result": {
"id": "ret_mock_123",
"object": "chat.completion",
"model": "gpt-x",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "A | B | C"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 5,
"total_tokens": 0
}
},
"response_text": "A | B | C"
}
},
"OUT_TEXT": {
"n1": "A | B | C",
"n2": "A | B | C"
},
"LAST_NODE": "n2",
"OUT1": "A | B | C",
"OUT2": "A | B | C",
"EXEC_TRACE": "n1(SetVars) -> n2(Return)"
}
}

View File

@@ -0,0 +1,40 @@
{
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 2,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n2": {
"result": {
"error": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"response_text": "",
"vars": {
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 2
}
}
},
"OUT_TEXT": {
"n2": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"LAST_NODE": "n2",
"OUT2": "Node n2 (ProviderCall) requires 'base_url' in config",
"EXEC_TRACE": "n2(ProviderCall)"
}
}

View File

@@ -0,0 +1,48 @@
{
"MSG": "abc123xyz",
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 1,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"vars": {
"MSG": "abc123xyz"
}
},
"n2": {
"result": {
"error": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"response_text": "",
"vars": {
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 1
}
}
},
"OUT_TEXT": {
"n1": "abc123xyz",
"n2": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"LAST_NODE": "n2",
"OUT1": "abc123xyz",
"OUT2": "Node n2 (ProviderCall) requires 'base_url' in config",
"EXEC_TRACE": "n1(SetVars) -> n2(ProviderCall)"
}
}

View File

@@ -0,0 +1,105 @@
{
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {
"messages": [
{
"role": "system",
"content": "Системный-тест CLAUDE"
},
{
"role": "user",
"content": "Прив"
},
{
"role": "assistant",
"content": "Привет!"
}
]
}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"result": {
"echo": {
"url": "http://mock.local/v1/messages",
"headers": {
"Content-Type": "application/json"
},
"payload": {
"model": "gpt-x",
"system": [
{
"type": "text",
"text": "Ты — Narrator-chan."
},
{
"type": "text",
"text": "Системный-тест CLAUDE"
}
],
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Системный-тест CLAUDE"
}
]
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Прив"
}
]
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "Привет!"
}
]
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "как лела"
}
]
}
]
}
}
},
"response_text": "http://mock.local/v1/messages"
}
},
"OUT_TEXT": {
"n1": "http://mock.local/v1/messages"
},
"LAST_NODE": "n1",
"OUT1": "http://mock.local/v1/messages",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -0,0 +1,101 @@
{
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {
"messages": [
{
"role": "system",
"content": "Системный-тест из входящего"
},
{
"role": "user",
"content": "Its just me.."
},
{
"role": "assistant",
"content": "Reply from model"
}
]
}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"result": {
"echo": {
"url": "http://mock.local/v1beta/models/gpt-x:generateContent",
"headers": {
"Content-Type": "application/json"
},
"payload": {
"model": "gpt-x",
"contents": [
{
"role": "user",
"parts": [
{
"text": "Системный-тест из входящего"
}
]
},
{
"role": "user",
"parts": [
{
"text": "Its just me.."
}
]
},
{
"role": "model",
"parts": [
{
"text": "Reply from model"
}
]
},
{
"role": "user",
"parts": [
{
"text": "как лела"
}
]
}
],
"systemInstruction": {
"parts": [
{
"text": "Ты — Narrator-chan."
},
{
"text": "Системный-тест из входящего"
}
]
}
}
}
},
"response_text": "http://mock.local/v1beta/models/gpt-x:generateContent"
}
},
"OUT_TEXT": {
"n1": "http://mock.local/v1beta/models/gpt-x:generateContent"
},
"LAST_NODE": "n1",
"OUT1": "http://mock.local/v1beta/models/gpt-x:generateContent",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -0,0 +1,79 @@
{
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {
"contents": [
{
"role": "user",
"parts": [
{
"text": "A"
}
]
},
{
"role": "model",
"parts": [
{
"text": "B"
}
]
}
]
}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "gemini",
"system": "",
"OUT": {
"n1": {
"result": {
"echo": {
"url": "http://mock.local/v1/chat/completions",
"headers": {
"Content-Type": "application/json"
},
"payload": {
"model": "gpt-x",
"messages": [
{
"role": "system",
"content": "Ты — Narrator-chan."
},
{
"role": "user",
"content": "как лела"
},
{
"role": "user",
"content": "A"
},
{
"role": "assistant",
"content": "B"
}
]
}
}
},
"response_text": "http://mock.local/v1/chat/completions"
}
},
"OUT_TEXT": {
"n1": "http://mock.local/v1/chat/completions"
},
"LAST_NODE": "n1",
"OUT1": "http://mock.local/v1/chat/completions",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -0,0 +1,40 @@
{
"WAS_ERROR__n1": true,
"CYCLEINDEX__n1": 1,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://example.local/test",
"path": "/test",
"query": "",
"headers": {
"content-type": "text/plain"
},
"json": "raw-plain-body-simulated"
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"result": {
"error": "Node n1 (RawForward): 'base_url' is not configured and vendor could not be detected."
},
"response_text": "",
"vars": {
"WAS_ERROR__n1": true,
"CYCLEINDEX__n1": 1
}
}
},
"OUT_TEXT": {
"n1": "Node n1 (RawForward): 'base_url' is not configured and vendor could not be detected."
},
"LAST_NODE": "n1",
"OUT1": "Node n1 (RawForward): 'base_url' is not configured and vendor could not be detected.",
"EXEC_TRACE": "n1(RawForward)"
}
}

File diff suppressed because one or more lines are too long

View File

@@ -29,13 +29,29 @@
Быстрый старт
Вариант А (Windows):
- Откройте файл [`run_agentui.bat`](run_agentui.bat) — он сам поставит зависимости и откроет редактор.
Вариант A (Windows, авто‑настройка .venv):
- Запустите [run_agentui.bat](run_agentui.bat) двойным кликом или из консоли.
- Скрипт сам:
- создаст локальное окружение .venv в каталоге проекта;
- обновит pip;
- установит зависимости из [requirements.txt](requirements.txt);
- поднимет сервер и откроет редактор в браузере.
- Переменные окружения (опционально перед запуском): HOST=127.0.0.1 PORT=7860
Вариант Б (любой ОС):
- Установите Python 3.10+ и выполните:
- pip install -r [`requirements.txt`](requirements.txt)
- python -m uvicorn agentui.api.server:app --host 127.0.0.1 --port 7860
Вариант B (Linux/macOS, авто‑настройка .venv):
- Сделайте исполняемым и запустите:
- chmod +x [run_agentui.sh](run_agentui.sh)
- ./run_agentui.sh
- Скрипт сделает то же самое: .venv + установка зависимостей + старт сервера.
Вариант C (ручной запуск, если хотите контролировать шаги):
- Установите Python 3.10+.
- Создайте и активируйте .venv:
- Windows (cmd): py -m venv .venv && .\.venv\Scripts\activate
- Linux/macOS (bash): python3 -m venv .venv && source .venv/bin/activate
- Установите зависимости и стартуйте сервер:
- pip install -r [requirements.txt](requirements.txt)
- python -m uvicorn agentui.api.server:app --host 127.0.0.1 --port 7860
Откройте в браузере:
- http://127.0.0.1:7860/ui/editor.html — визуальный редактор узлов

View File

@@ -1,10 +1,12 @@
from fastapi import FastAPI, Request, HTTPException, Query, Header
import logging
from logging.handlers import RotatingFileHandler
import json
from urllib.parse import urlsplit, urlunsplit, parse_qsl, urlencode, unquote
from fastapi.responses import JSONResponse, HTMLResponse, StreamingResponse
from fastapi.responses import JSONResponse, HTMLResponse, StreamingResponse, FileResponse
from fastapi.staticfiles import StaticFiles
import os
import hashlib
import time
from pydantic import BaseModel, Field
from typing import Any, Dict, List, Literal, Optional
from agentui.pipeline.executor import PipelineExecutor
@@ -12,6 +14,7 @@ from agentui.pipeline.defaults import default_pipeline
from agentui.pipeline.storage import load_pipeline, save_pipeline, list_presets, load_preset, save_preset, load_var_store
from agentui.common.vendors import detect_vendor
from agentui.common.cancel import request_cancel, clear_cancel, is_cancelled
from agentui.pipeline.templating import render_template_simple
class UnifiedParams(BaseModel):
@@ -175,35 +178,7 @@ def build_macro_context(u: UnifiedChatRequest, incoming: Optional[Dict[str, Any]
}
def jinja_render(template: str, ctx: Dict[str, Any]) -> str:
# Чтобы не тянуть Jinja2 в MVP: простая {{ key.path }} замена
def get_value(path: str, data: Dict[str, Any]) -> Any:
cur: Any = data
for part in path.split('.'):
if isinstance(cur, dict):
cur = cur.get(part, "")
else:
return ""
return cur if isinstance(cur, (str, int, float)) else ""
out = template
import re
for m in re.findall(r"\{\{\s*([^}]+)\s*\}\}", template):
expr = m.strip()
# support simple default filter: {{ path|default(value) }}
default_match = re.match(r"([^|]+)\|\s*default\((.*)\)", expr)
if default_match:
path = default_match.group(1).strip()
fallback = default_match.group(2).strip()
# strip quotes if present
if (fallback.startswith("\"") and fallback.endswith("\"")) or (fallback.startswith("'") and fallback.endswith("'")):
fallback = fallback[1:-1]
raw_val = get_value(path, ctx)
val = str(raw_val) if raw_val not in (None, "") else str(fallback)
else:
val = str(get_value(expr, ctx))
out = out.replace("{{ "+m+" }}", val).replace("{{"+m+"}}", val)
return out
# jinja_render removed (duplication). Use agentui.pipeline.templating.render_template_simple instead.
async def execute_pipeline_echo(u: UnifiedChatRequest) -> Dict[str, Any]:
@@ -211,7 +186,7 @@ async def execute_pipeline_echo(u: UnifiedChatRequest) -> Dict[str, Any]:
macro_ctx = build_macro_context(u)
# PromptTemplate
prompt_template = "System: {{ system }}\nUser: {{ chat.last_user }}"
rendered_prompt = jinja_render(prompt_template, macro_ctx)
rendered_prompt = render_template_simple(prompt_template, macro_ctx, {})
# LLMInvoke (echo, т.к. без реального провайдера в MVP)
llm_response_text = f"[echo by {u.model}]\n" + rendered_prompt
# Дополняем эхо человекочитаемым трейсом выполнения пайплайна (если есть)
@@ -274,10 +249,7 @@ def create_app() -> FastAPI:
if not logger.handlers:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
file_handler = RotatingFileHandler("agentui.log", maxBytes=1_000_000, backupCount=3, encoding="utf-8")
file_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
# --- Simple in-process SSE hub (subscriptions per browser tab) ---
import asyncio as _asyncio
@@ -362,6 +334,77 @@ def create_app() -> FastAPI:
except Exception: # noqa: BLE001
pass
async def _run_pipeline_for_payload(request: Request, payload: Dict[str, Any], raw: Optional[bytes] = None) -> JSONResponse:
# Единый обработчик: лог входящего запроса, нормализация, запуск PipelineExecutor, fallback-echo, лог ответа
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
# Диагностический INFOлог для валидации рефакторинга
try:
logger.info(
"%s",
json.dumps(
{
"event": "unified_handler",
"vendor": unified.vendor_format,
"model": unified.model,
"pipeline_id": pipeline.get("id", "pipeline_editor"),
},
ensure_ascii=False,
),
)
except Exception:
pass
# Mark pipeline start for UI and measure total active time
t0 = time.perf_counter()
try:
await _trace_hub.publish({
"event": "pipeline_start",
"pipeline_id": pipeline.get("id", "pipeline_editor"),
"ts": int(time.time() * 1000),
})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
# Mark pipeline end for UI
t1 = time.perf_counter()
try:
await _trace_hub.publish({
"event": "pipeline_done",
"pipeline_id": pipeline.get("id", "pipeline_editor"),
"ts": int(time.time() * 1000),
"duration_ms": int((t1 - t0) * 1000),
})
except Exception:
pass
await _log_response(request, 200, result)
return JSONResponse(result)
@app.get("/")
async def index() -> HTMLResponse:
html = (
@@ -383,33 +426,7 @@ def create_app() -> FastAPI:
payload = json.loads(raw or b"{}")
except Exception: # noqa: BLE001
raise HTTPException(status_code=400, detail="Invalid JSON")
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False # по требованию MVP без стриминга
# контекст для пайплайна
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
# Google AI Studio совместимые роуты (Gemini):
# POST /v1beta/models/{model}:generateContent?key=...
@@ -421,34 +438,10 @@ def create_app() -> FastAPI:
payload = json.loads(raw or b"{}")
except Exception: # noqa: BLE001
raise HTTPException(status_code=400, detail="Invalid JSON")
# Убедимся, что модель присутствует в полезной нагрузке
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
@app.post("/v1/models/{model}:generateContent")
async def gemini_generate_content_v1(model: str, request: Request, key: Optional[str] = Query(default=None)) -> JSONResponse: # noqa: ARG001
@@ -460,30 +453,7 @@ def create_app() -> FastAPI:
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
# Catch-all для случаев, когда двоеточие в пути закодировано как %3A
@app.post("/v1beta/models/{rest_of_path:path}")
@@ -500,30 +470,7 @@ def create_app() -> FastAPI:
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
@app.post("/v1/models/{rest_of_path:path}")
async def gemini_generate_content_v1_catchall(rest_of_path: str, request: Request, key: Optional[str] = Query(default=None)) -> JSONResponse: # noqa: ARG001
@@ -539,30 +486,7 @@ def create_app() -> FastAPI:
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
# Anthropic Claude messages endpoint compatibility
@app.post("/v1/messages")
@@ -574,37 +498,114 @@ def create_app() -> FastAPI:
raise HTTPException(status_code=400, detail="Invalid JSON")
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
# Помечаем как Anthropic, передаём версию из заголовка в payload для детекции
if anthropic_version:
payload = {**payload, "anthropic_version": anthropic_version}
else:
payload = {**payload, "anthropic_version": payload.get("anthropic_version", "2023-06-01")}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
app.mount("/ui", StaticFiles(directory="static", html=True), name="ui")
# NOTE: нельзя объявлять эндпоинты под /ui/* после монтирования StaticFiles(/ui),
# т.к. монтирование перехватывает все пути под /ui. Используем отдельный путь /ui_version.
@app.get("/ui_version")
async def ui_version() -> JSONResponse:
try:
import time
static_dir = os.path.abspath("static")
editor_path = os.path.join(static_dir, "editor.html")
js_ser_path = os.path.join(static_dir, "js", "serialization.js")
js_pm_path = os.path.join(static_dir, "js", "pm-ui.js")
def md5p(p: str):
try:
with open(p, "rb") as f:
return hashlib.md5(f.read()).hexdigest()
except Exception:
return None
payload = {
"cwd": os.path.abspath("."),
"static_dir": static_dir,
"files": {
"editor.html": md5p(editor_path),
"js/serialization.js": md5p(js_ser_path),
"js/pm-ui.js": md5p(js_pm_path),
},
"ts": int(time.time()),
}
return JSONResponse(payload, headers={"Cache-Control": "no-store"})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500, headers={"Cache-Control": "no-store"})
# --- Favicon and PWA icons at root -----------------------------------------
FAV_DIR = "favicon_io_saya"
@app.get("/favicon.ico")
async def _favicon_ico():
p = f"{FAV_DIR}/favicon.ico"
try:
return FileResponse(p, media_type="image/x-icon")
except Exception:
raise HTTPException(status_code=404, detail="favicon not found")
@app.get("/apple-touch-icon.png")
async def _apple_touch_icon():
p = f"{FAV_DIR}/apple-touch-icon.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="apple-touch-icon not found")
@app.get("/favicon-32x32.png")
async def _favicon_32():
p = f"{FAV_DIR}/favicon-32x32.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="favicon-32x32 not found")
@app.get("/favicon-16x16.png")
async def _favicon_16():
p = f"{FAV_DIR}/favicon-16x16.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="favicon-16x16 not found")
@app.get("/android-chrome-192x192.png")
async def _android_192():
p = f"{FAV_DIR}/android-chrome-192x192.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="android-chrome-192x192 not found")
@app.get("/android-chrome-512x512.png")
async def _android_512():
p = f"{FAV_DIR}/android-chrome-512x512.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="android-chrome-512x512 not found")
@app.get("/site.webmanifest")
async def _site_manifest():
p = f"{FAV_DIR}/site.webmanifest"
try:
return FileResponse(p, media_type="application/manifest+json")
except Exception:
raise HTTPException(status_code=404, detail="site.webmanifest not found")
# Custom APNG favicon for "busy" state in UI
@app.get("/saya1.png")
async def _apng_busy_icon():
p = f"{FAV_DIR}/saya1.png"
try:
# APNG served as image/png is acceptable for browsers
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="saya1.png not found")
# Variable store API (per-pipeline)
@app.get("/admin/vars")
async def get_vars() -> JSONResponse:
@@ -640,7 +641,37 @@ def create_app() -> FastAPI:
# Admin API для пайплайна
@app.get("/admin/pipeline")
async def get_pipeline() -> JSONResponse:
return JSONResponse(load_pipeline())
p = load_pipeline()
# Диагностический лог состава meta (для подтверждения DRY-рефакторинга)
try:
meta_keys = [
"id","name","parallel_limit","loop_mode","loop_max_iters","loop_time_budget_ms","clear_var_store",
"http_timeout_sec","text_extract_strategy","text_extract_json_path","text_join_sep","text_extract_presets"
]
present = [k for k in meta_keys if k in p]
meta_preview = {k: p.get(k) for k in present if k != "text_extract_presets"}
presets_count = 0
try:
presets = p.get("text_extract_presets")
if isinstance(presets, list):
presets_count = len(presets)
except Exception:
presets_count = 0
logger.info(
"%s",
json.dumps(
{
"event": "admin_get_pipeline_meta",
"keys": present,
"presets_count": presets_count,
"meta_preview": meta_preview,
},
ensure_ascii=False,
),
)
except Exception:
pass
return JSONResponse(p)
@app.post("/admin/pipeline")
async def set_pipeline(request: Request) -> JSONResponse:
@@ -652,6 +683,37 @@ def create_app() -> FastAPI:
# простая проверка
if not isinstance(pipeline, dict) or "nodes" not in pipeline:
raise HTTPException(status_code=400, detail="Invalid pipeline format")
# Диагностический лог входящих meta-ключей перед сохранением
try:
meta_keys = [
"id","name","parallel_limit","loop_mode","loop_max_iters","loop_time_budget_ms","clear_var_store",
"http_timeout_sec","text_extract_strategy","text_extract_json_path","text_join_sep","text_extract_presets"
]
present = [k for k in meta_keys if k in pipeline]
meta_preview = {k: pipeline.get(k) for k in present if k != "text_extract_presets"}
presets_count = 0
try:
presets = pipeline.get("text_extract_presets")
if isinstance(presets, list):
presets_count = len(presets)
except Exception:
presets_count = 0
logger.info(
"%s",
json.dumps(
{
"event": "admin_set_pipeline_meta",
"keys": present,
"presets_count": presets_count,
"meta_preview": meta_preview,
},
ensure_ascii=False,
),
)
except Exception:
pass
save_pipeline(pipeline)
return JSONResponse({"ok": True})

View File

@@ -86,6 +86,41 @@ def _read_kv_from_proxy_file() -> Dict[str, str]:
return out
return out
def _read_second_bare_flag_from_proxy() -> Optional[bool]:
"""
Читает «вторую голую строку» после URL в proxy.txt и интерпретирует как флаг verify:
true/1/yes/on -> True
false/0/no/off -> False
Возвращает None, если строка отсутствует или не распознана.
"""
try:
p = Path("proxy.txt")
if not p.exists():
return None
lines = [ln.strip() for ln in p.read_text(encoding="utf-8").splitlines()]
# найдём первую «URL» строку (без '=' и не пустую/коммент)
idx_url = -1
for i, ln in enumerate(lines):
if not ln or ln.startswith("#") or "=" in ln:
continue
idx_url = i
break
if idx_url >= 0:
# ищем следующую «голую» строку
for j in range(idx_url + 1, len(lines)):
ln = lines[j].strip()
if not ln or ln.startswith("#") or "=" in ln:
continue
low = ln.lower()
if low in ("1", "true", "yes", "on"):
return True
if low in ("0", "false", "no", "off"):
return False
# если это не похожее на флаг — считаем отсутствующим
break
except Exception:
return None
return None
def get_tls_verify() -> Union[bool, str]:
"""
Возвращает значение для параметра httpx.AsyncClient(verify=...):
@@ -119,31 +154,11 @@ def get_tls_verify() -> Union[bool, str]:
if path.exists():
return str(path)
# 2.1) Дополнительно: поддержка второй строки без ключа — true/false
try:
p = Path("proxy.txt")
if p.exists():
lines = [ln.strip() for ln in p.read_text(encoding="utf-8").splitlines()]
# найдём первую «URL» строку (без '=' и не пустую/коммент)
idx_url = -1
for i, ln in enumerate(lines):
if not ln or ln.startswith("#") or "=" in ln:
continue
idx_url = i
break
if idx_url >= 0:
# ищем следующую «голую» строку
for j in range(idx_url + 1, len(lines)):
ln = lines[j].strip()
if not ln or ln.startswith("#") or "=" in ln:
continue
low = ln.lower()
if low in ("1", "true", "yes", "on"):
return True
if low in ("0", "false", "no", "off"):
return False
# если это не похожее на флаг, игнорируем и продолжаем
except Exception:
pass
second = _read_second_bare_flag_from_proxy()
if second is True:
return True
if second is False:
return False
# 3) Файл по умолчанию в корне проекта
default_ca = Path("proxy-ca.pem")
@@ -173,26 +188,9 @@ def is_verify_explicit() -> bool:
if "verify" in kv or "ca" in kv:
return True
# Вторая «голая» строка как явный флаг
try:
p = Path("proxy.txt")
if p.exists():
lines = [ln.strip() for ln in p.read_text(encoding="utf-8").splitlines()]
idx_url = -1
for i, ln in enumerate(lines):
if not ln or ln.startswith("#") or "=" in ln:
continue
idx_url = i
break
if idx_url >= 0:
for j in range(idx_url + 1, len(lines)):
ln = lines[j].strip()
if not ln or ln.startswith("#") or "=" in ln:
continue
if ln.lower() in ("1", "0", "true", "false", "yes", "no", "on", "off"):
return True
break
except Exception:
pass
second = _read_second_bare_flag_from_proxy()
if second is not None:
return True
if Path("proxy-ca.pem").exists():
return True

File diff suppressed because it is too large Load Diff

View File

@@ -11,17 +11,93 @@ PRESETS_DIR = Path("presets")
VARS_DIR = Path(".agentui") / "vars"
# DRY нормализация meta/пайплайна: единый источник дефолтов и типов
def normalize_pipeline(pipeline: Dict[str, Any]) -> Dict[str, Any]:
"""
Приводит верхнеуровневые ключи пайплайна к согласованному виду, заполняет дефолты.
Безопасно к отсутствующим ключам и неверным типам.
"""
if not isinstance(pipeline, dict):
pipeline = {}
out: Dict[str, Any] = dict(pipeline)
def _to_int(v, d):
try:
n = int(v)
return n if n > 0 else d
except Exception:
return d
def _to_float(v, d):
try:
n = float(v)
return n if n > 0 else d
except Exception:
return d
# Базовые поля
out["id"] = str(out.get("id") or "pipeline_editor")
out["name"] = str(out.get("name") or "Edited Pipeline")
out["parallel_limit"] = _to_int(out.get("parallel_limit"), 8)
out["loop_mode"] = str(out.get("loop_mode") or "dag")
out["loop_max_iters"] = _to_int(out.get("loop_max_iters"), 1000)
out["loop_time_budget_ms"] = _to_int(out.get("loop_time_budget_ms"), 10000)
out["clear_var_store"] = bool(out.get("clear_var_store", True))
out["http_timeout_sec"] = _to_float(out.get("http_timeout_sec"), 60)
# Глобальные опции извлечения текста для [[OUTx]]
out["text_extract_strategy"] = str(out.get("text_extract_strategy") or "auto")
out["text_extract_json_path"] = str(out.get("text_extract_json_path") or "")
# Поддержка разных написаний text_join_sep
join_sep = out.get("text_join_sep")
if join_sep is None:
for k in list(out.keys()):
if isinstance(k, str) and k.lower() == "text_join_sep":
join_sep = out.get(k)
break
out["text_join_sep"] = str(join_sep or "\n")
# Пресеты парсинга
presets = out.get("text_extract_presets")
norm_presets: List[Dict[str, Any]] = []
if isinstance(presets, list):
for i, it in enumerate(presets):
if not isinstance(it, dict):
continue
norm_presets.append({
"id": str(it.get("id") or f"p{i}"),
"name": str(it.get("name") or it.get("json_path") or "Preset"),
"strategy": str(it.get("strategy") or "auto"),
"json_path": str(it.get("json_path") or ""),
"join_sep": str(it.get("join_sep") or "\n"),
})
out["text_extract_presets"] = norm_presets
# Узлы — список
try:
nodes = out.get("nodes") or []
if not isinstance(nodes, list):
nodes = []
out["nodes"] = nodes
except Exception:
out["nodes"] = []
return out
def load_pipeline() -> Dict[str, Any]:
if PIPELINE_FILE.exists():
try:
return json.loads(PIPELINE_FILE.read_text(encoding="utf-8"))
except Exception:
pass
return default_pipeline()
if PIPELINE_FILE.exists():
try:
data = json.loads(PIPELINE_FILE.read_text(encoding="utf-8"))
return normalize_pipeline(data)
except Exception:
pass
return normalize_pipeline(default_pipeline())
def save_pipeline(pipeline: Dict[str, Any]) -> None:
PIPELINE_FILE.write_text(json.dumps(pipeline, ensure_ascii=False, indent=2), encoding="utf-8")
norm = normalize_pipeline(pipeline or {})
PIPELINE_FILE.write_text(json.dumps(norm, ensure_ascii=False, indent=2), encoding="utf-8")
def list_presets() -> List[str]:

View File

@@ -35,6 +35,11 @@ _BARE_MACRO_RE = re.compile(r"\[\[\s*([A-Za-z_][A-Za-z0-9_]*(?:\.[^\]]+?)?)\s*\]
# Разбираем выражение до ближайшего '}}', допускаем '}' внутри (например в JSON-литералах)
_BRACES_RE = re.compile(r"\{\{\s*(.*?)\s*\}\}", re.DOTALL)
# Сокращённый синтаксис: img(mime?)[[...]] → data:<mime>;base64,<resolved_inner_macro>
# Пример: img()[[OUT1]] → data:image/png;base64,{{resolved OUT1}}
# img(jpeg)[[OUT:n1.result...]] → data:image/jpeg;base64,{{resolved}}
_IMG_WRAPPER_RE = re.compile(r"(?is)img\(\s*([^)]+?)?\s*\)\s*\[\[\s*(.+?)\s*\]\]")
def _split_path(path: str) -> List[str]:
return [p.strip() for p in str(path).split(".") if str(p).strip()]
@@ -164,12 +169,21 @@ def _best_text_from_outputs(node_out: Any) -> str:
# Gemini
try:
if isinstance(base, dict):
cand0 = (base.get("candidates") or [{}])[0]
content = cand0.get("content") or {}
parts0 = (content.get("parts") or [{}])[0]
t = parts0.get("text")
if isinstance(t, str):
return t
cands = base.get("candidates") or []
texts: List[str] = []
for cand in cands:
try:
content = cand.get("content") or {}
parts = content.get("parts") or []
for p in parts:
if isinstance(p, dict):
t = p.get("text")
if isinstance(t, str) and t.strip():
texts.append(t.strip())
except Exception:
continue
if texts:
return "\n".join(texts)
except Exception:
pass
@@ -203,6 +217,47 @@ def render_template_simple(template: str, context: Dict[str, Any], out_map: Dict
return ""
s = str(template)
# 0) Сокращённый синтаксис: img(mime?)[[...]] → data:<mime>;base64,<resolved>
# Выполняем до развёртки обычных [[...]] макросов, чтобы внутри можно было использовать любой квадратный макрос.
def _normalize_mime(m: str) -> str:
mm = (m or "").strip().lower()
if not mm:
return "image/png"
if "/" in mm:
return mm
return {
"png": "image/png",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"webp": "image/webp",
"gif": "image/gif",
"svg": "image/svg+xml",
"bmp": "image/bmp",
"tif": "image/tiff",
"tiff": "image/tiff",
}.get(mm, mm)
def _repl_imgwrap(m: re.Match) -> str:
mime_raw = m.group(1) or ""
inner = m.group(2) or ""
mime = _normalize_mime(mime_raw)
try:
val = _resolve_square_macro_value(inner, context, out_map)
except Exception:
val = ""
if isinstance(val, (dict, list, bool)) or val is None:
val = _stringify_for_template(val)
else:
val = str(val)
return f"data:{mime};base64,{val}"
# Поддерживаем много вхождений — повторяем до исчерпания (на случай каскадных макросов)
while True:
ns, cnt = _IMG_WRAPPER_RE.subn(_repl_imgwrap, s)
s = ns
if cnt == 0:
break
# 1) Макросы [[VAR:...]] / [[OUT:...]] / [[STORE:...]]
def repl_var(m: re.Match) -> str:
path = m.group(1).strip()
@@ -539,8 +594,18 @@ def _tokenize_condition_expr(expr: str, context: Dict[str, Any], out_map: Dict[s
while j < n and (expr[j].isalnum() or expr[j] in "._"):
j += 1
word = expr[i:j]
# Логические в словах не поддерживаем (используйте &&, ||, !)
tokens.append(word)
# Поддержка «голых» идентификаторов из vars: cycleindex, WAS_ERROR и т.п.
# Если это простой идентификатор (без точек) и он есть в context.vars — биндим его значением.
try:
vmap = context.get("vars") or {}
except Exception:
vmap = {}
if re.fullmatch(r"[A-Za-z_][A-Za-z0-9_]*", word) and isinstance(vmap, dict) and word in vmap:
name = add_binding(vmap.get(word))
tokens.append(name)
else:
# Логические в словах не поддерживаем (используйте &&, ||, !)
tokens.append(word)
i = j
continue

View File

@@ -33,12 +33,25 @@ def build_client(timeout: float = 60.0) -> httpx.AsyncClient:
print("[agentui.http_client] proxies=", masked, " verify=", verify)
# httpx сам понимает схемы socks://, socks5:// при установленном extras [socks]
client = httpx.AsyncClient(
timeout=timeout,
proxies=proxies,
follow_redirects=True,
verify=verify,
)
try:
client = httpx.AsyncClient(
timeout=timeout,
proxies=proxies,
follow_redirects=True,
verify=verify,
)
except TypeError:
if proxies:
try:
masked = {k: _mask_proxy(v) for k, v in proxies.items()}
except Exception:
masked = proxies
print(f"[agentui.http_client] WARNING: proxies not supported in httpx.AsyncClient, skipping proxies={masked}")
client = httpx.AsyncClient(
timeout=timeout,
follow_redirects=True,
verify=verify,
)
return client

View File

@@ -6,9 +6,9 @@
Файлы, где «живет» эта магия:
- Сервер и конечные точки: [agentui/api/server.py](agentui/api/server.py)
- Исполнитель узлов (сердце конвейера): [PipelineExecutor.run()](agentui/pipeline/executor.py:170)
- Ноды: [ProviderCallNode.run()](agentui/pipeline/executor.py:1631), [RawForwardNode.run()](agentui/pipeline/executor.py:1939), [ReturnNode.run()](agentui/pipeline/executor.py:2256), [IfNode.run()](agentui/pipeline/executor.py:2350), SetVars внутри того же файла
- Шаблонизатор (подстановки [[...]] и {{ ... }}): [render_template_simple()](agentui/pipeline/templating.py:191), булевы выражения If: [eval_condition_expr()](agentui/pipeline/templating.py:336)
- Исполнитель узлов (сердце конвейера): [PipelineExecutor.run()](agentui/pipeline/executor.py:316)
- Ноды: [ProviderCallNode.run()](agentui/pipeline/executor.py:2007), [RawForwardNode.run()](agentui/pipeline/executor.py:2477), [ReturnNode.run()](agentui/pipeline/executor.py:2798), [IfNode.run()](agentui/pipeline/executor.py:2892), SetVars внутри того же файла
- Шаблонизатор (подстановки [[...]] и {{ ... }}): [render_template_simple()](agentui/pipeline/templating.py:196), булевы выражения If: [eval_condition_expr()](agentui/pipeline/templating.py:382)
— — —
@@ -218,10 +218,10 @@ Return сам завернёт текст в правильную структу
Когда нода (ProviderCall или RawForward) получила JSON от провайдера, движок старается «вынуть» из него удобный текст:
- OpenAI: choices[0].message.content
- Gemini: candidates[0].content.parts[0].text
- Gemini: все parts[].text во всех candidates склеиваются через "\n" (пустые/пробельные части игнорируются)
- Claude: content[].text (склейка)
- Если формат неизвестен — идёт «лучший догадчик» по глубине, чтобы найти текст
- Можно задать пресет (JSONPath) в настройках ноды или глобально в «Запуск» → «Пресеты парсинга OUTx»
- Можно задать пресет (JSONPath) в настройках ноды или глобально в «Запуск» → «Пресеты парсинга OUTx»; для Gemini альтернатива авто: json_path="candidates.*.content.parts.*.text", join_sep="\n"
Логика извлечения и пресетов находится в: [ProviderCallNode.run()](agentui/pipeline/executor.py:1631), [RawForwardNode.run()](agentui/pipeline/executor.py:1939)
@@ -275,12 +275,12 @@ Return сам завернёт текст в правильную структу
- Создание приложения/роутов: [create_app()](agentui/api/server.py:270)
- Нормализация payload (OpenAI/Gemini/Claude → единый вид): [normalize_to_unified()](agentui/api/server.py:44)
- Контекст для макросов (incoming/chat/params/…): [build_macro_context()](agentui/api/server.py:143)
- Исполнитель конвейера и «волны»: [PipelineExecutor.run()](agentui/pipeline/executor.py:170)
- Узел вызова провайдера (сборка [[PROMPT]], лог HTTP): [ProviderCallNode.run()](agentui/pipeline/executor.py:1631)
- Прямой форвард запроса: [RawForwardNode.run()](agentui/pipeline/executor.py:1939)
- Финализация ответа под формат клиента: [ReturnNode.run()](agentui/pipeline/executor.py:2256)
- Условия If (contains, &&, ||, ! и макросы): [IfNode.run()](agentui/pipeline/executor.py:2350), [eval_condition_expr()](agentui/pipeline/templating.py:336)
- Шаблонизатор макросов [[...]] и {{ ... }}: [render_template_simple()](agentui/pipeline/templating.py:191)
- Исполнитель конвейера и «волны»: [PipelineExecutor.run()](agentui/pipeline/executor.py:316)
- Узел вызова провайдера (сборка [[PROMPT]], лог HTTP): [ProviderCallNode.run()](agentui/pipeline/executor.py:2007)
- Прямой форвард запроса: [RawForwardNode.run()](agentui/pipeline/executor.py:2477)
- Финализация ответа под формат клиента: [ReturnNode.run()](agentui/pipeline/executor.py:2798)
- Условия If (contains, &&, ||, ! и макросы): [IfNode.run()](agentui/pipeline/executor.py:2892), [eval_condition_expr()](agentui/pipeline/templating.py:382)
- Шаблонизатор макросов [[...]] и {{ ... }}: [render_template_simple()](agentui/pipeline/templating.py:196)
- Определение провайдера по форме JSON: [detect_vendor()](agentui/common/vendors.py:8)
— — —
@@ -431,4 +431,266 @@ Return сам завернёт текст в правильную структу
Где посмотреть действующие значения
- В редакторе нажмите «ПЕРЕМЕННЫЕ»: там видно STORE (включая snapshot OUT_TEXT и алиасы OUT1/OUT2). Клик по строке — копирование макроса для вставки.
Схема работы панели описана в [static/editor.html](static/editor.html) и скриптах [static/js/serialization.js](static/js/serialization.js), [static/js/pm-ui.js](static/js/pm-ui.js).
Схема работы панели описана в [static/editor.html](static/editor.html) и скриптах [static/js/serialization.js](static/js/serialization.js), [static/js/pm-ui.js](static/js/pm-ui.js).
10) Работа с изображениями (ProviderCall + переменные)
Обзор
- Теперь можно:
- хранить изображения в переменных SetVars в виде data URL;
- вставлять изображения в Prompt Blocks через Markdown-нотацию;
- вызывать провайдеров OpenAI / Claude / Gemini с мультимодальными сообщениями.
Как хранить изображения в переменных
- В SetVars (mode=expr) доступны новые безопасные функции:
- file_b64(path) — читает файл и возвращает base64-строку (без префикса).
- data_url(b64, mime) — собирает data URL: data:mime;base64,&lt;b64&gt;.
- file_data_url(path, mime?) — обёртка: читает файл, определяет mime по расширению (если не указан) и возвращает полноценный data URL.
- Примеры SetVars:
- name: IMG1
mode: expr
value: file_data_url('static/samples/cat.png','image/png')
- name: IMG2
mode: expr
value: data_url(file_b64('static/samples/dog.jpg'), 'image/jpeg')
- После этого [[IMG1]] / [[IMG2]] вернут строку-полный data URL, пригодный для мультимодальных LLM.
Как вставлять картинки в Prompt Blocks
- Внутри блока (system/user/assistant) используйте Markdown-нотацию:
- ![alt]([[IMG1]])
- ![alt](https://example.org/pic.png)
- ![alt](data:image/webp;base64,....)
- Во время исполнения ProviderCall блок превращается в список частей:
- текстовые сегменты → {"type":"text","text":"..."}
- изображения → {"type":"image_url","url":"..."}
- Для plainтекста ничего не меняется (обратная совместимость полностью сохранена).
Маппинг для провайдеров
- OpenAI (chat.completions):
- message.content → либо строка, либо массив частей:
- {"type":"text","text":"..."}
- {"type":"image_url","image_url":{"url":"<http(s) или data:...>"}}
- Claude (messages v20230601):
- message.content — массив блоков:
- {"type":"text","text":"..."}
- {"type":"image","source":{"type":"url","url":"..."}} — для http(s)
- {"type":"image","source":{"type":"base64","media_type":"image/png","data":"..."}} — для data URL
- Gemini (generateContent):
- contents[].parts — микс:
- {"text":"..."}
- {"inline_data":{"mime_type":"image/png","data":"&lt;base64&gt;"}} — для data URL
- http/https URL напрямую не инлайнится; либо преобразуйте в data URL, либо реализуйте внешнюю загрузку (не требуется для базовой поддержки).
Мини-рецепт
1) SetVars:
- name: IMG
- mode: expr
- value: file_data_url('static/pictures/cat.png', 'image/png')
2) ProviderCall (OpenAI/Gemini/Claude), Prompt Blocks, user:
- Опиши картинку ниже кратко.
- ![cat]([[IMG]])
Примечания
- Никакого тримминга больших строк и base64 по умолчанию не применяется (по пожеланию). Храните картинки разумного размера.
- JSONтело ProviderCall остаётся валидным JSON после развёртывания [[PROMPT]], т.к. преобразование в «части» выполняется до формирования payload.
- Для OpenAI и Claude можно использовать как http/https URL, так и data URL. Для Gemini предпочтительно data URL (inline_data).
Где реализовано
- Backend преобразование блоков и маппинг под провайдеров — см. файл agentui/pipeline/executor.py.
- Новые функции expr для SetVars — см. тот же файл внутри SetVarsNode._safe_eval_expr().
- Шаблонизатор как и раньше отвечает за развёртку [[...]] / {{ ... }} — см. agentui/pipeline/templating.py.
10.1) Супер‑короткая запись для картинок: img(mime)[[...]]
Задача
- У вас есть base64строка (без префикса data:), например вы её извлекли пресетом JSONPath: candidates.0.content.parts.1.inlineData.data.
- Хотите получить ПОЛНЫЙ data URL без плясок с expr и функциям SetVars — прямо в обычной строке шаблона.
Решение (новый синтаксис)
- Пишите: img(mime)[[МАКРОС_С_BASE64]]
- На выходе получится строка: data:&lt;mime&gt;;base64,&lt;ваш_base64&gt;
Где это работает
- Везде, где используется шаблонизатор [render_template_simple()](agentui/pipeline/templating.py:191):
- Return.text_template
- ProviderCall.template / headers / endpoint
- RawForward.extra_headers / override_path
- SetVars (mode=string)
- Реализовано в препроцессоре шаблонов: [templating.py](agentui/pipeline/templating.py)
Синтаксис подробно
1) Базовый случай (по умолчанию image/png):
- img()[[OUT1]]
→ data:image/png;base64,[[OUT1]] (после разворачивания [[OUT1]] — получится полноценный data URL)
2) Явный тип по короткому имени:
- img(png)[[...]] → image/png
- img(jpeg)[[...]] → image/jpeg (alias: jpg → image/jpeg)
- img(webp)[[...]] → image/webp
- img(gif)[[...]] → image/gif
- img(svg)[[...]] → image/svg+xml
- img(bmp)[[...]] → image/bmp
- img(tif)[[...]] / img(tiff)[[...]] → image/tiff
3) Полный MIME:
- img(image/heic)[[...]] → data:image/heic;base64,...
4) Динамический MIME через фигурные скобки:
- img({{ OUT.n1.result.candidates.0.content.parts.1.inlineData.mimeType|default('image/png') }})[[OUT1]]
- Важно: MIME в круглых скобках можно задавать фигурными {{ ... }} — после сборки строки препроцессор оставит «data:{{ ... }};base64,...», и следующий проход шаблонизатора подставит реальное значение.
Пошаговые примеры (копируйте и вставляйте)
A. Вернуть картинку как строку data URL из Return без SetVars и expr
- У вас пресет JSONPath на base64: candidates.0.content.parts.1.inlineData.data.
- В Return.text_template напишите:
img(png)[[OUT1]]
или, если [[OUT1]] — не base64, а вытащить надо конкретное поле:
img(png)[[VAR:OUT.n1.result.candidates.0.content.parts.1.inlineData.data]]
- В результате Return отдаст текст:
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...
- Это удобно, когда потребитель умеет работать с data URL строками.
B. Сформировать HTML &lt;img&gt; в Return прямо в шаблоне
- В Return.text_template:
&lt;img src="img(jpeg)[[VAR:OUT.n1.result.candidates.0.content.parts.1.inlineData.data]]" alt="preview"&gt;
- На выходе получится полноценный HTML с src="data:image/jpeg;base64,...".
C. Положить data URL в переменную без expr
- SetVars → переменная IMG (mode=string):
value: img()[[VAR:OUT.n1.result.candidates.0.content.parts.1.inlineData.data]]
- Далее [[IMG]] — готовый data URL (можно вставлять куда угодно).
D. Динамический MIME из ответа провайдера
- Если в JSON есть mimeType:
img({{ OUT.n1.result.candidates.0.content.parts.1.inlineData.mimeType|default('image/png') }})[[VAR:OUT.n1.result.candidates.0.content.parts.1.inlineData.data]]
Часто задаваемые вопросы
Q1: Чем отличается от «старого» способа через SetVars + data_url()?
- Старый (через expr) остаётся и удобен, когда нужно переиспользовать значение в нескольких местах или делать дополнительную логику.
- Новый — мгновенная подстановка «в одну строку» прямо в шаблоне, без expr и функций. Он быстрее для типовой задачи «base64 → data URL».
Q2: Что будет, если внутри [[...]] вернётся не строка?
- Шаблонизатор приведёт значение к строке (для dict/list — сериализует в JSON). Но для data URL ожидается именно base64строка. Следите, чтобы путь/макрос давал строку, не объект.
Q3: Можно ли внутри скобок img(...) использовать [[...]]?
- Нет, внутри круглых скобок лучше использовать {{ ... }} (фигурные). Пример выше с mimeType показывает правильный путь. Квадратные [[...]] в круглых скобках не разбираются этим прелюдом, зато {{ ... }} спокойно подставятся на следующем шаге.
Q4: Нужно ли дописывать data:...;base64, вручную?
- Нет. Именно для этого и сделан синтаксис img(mime)[[...]]. Вы указываете только MIME (или оставляете пустым), а шаблонизатор добавляет префикс сам.
Q5: Что если мне нужен не текст data URL, а отрисованное превью?
- Блок «ЛОГИ → Data» уже показывает мини‑превью изображений для HTTPответов нод ProviderCall и RawForward, независимо от OUT1.
- Если вы хотите «превью прямо в Return», используйте HTML: &lt;img src="img(png)[[...]]"&gt; — потребитель, способный отображать HTML, увидит картинку.
Диагностика и отладка
- Если вы видите «сырую» base64строку вместо data URL, проверьте, что используете новый синтаксис img(...)[[...]] или явно дописали префикс вручную.
- Если «картинки» не видно в Return, удостоверьтесь, что получатель умеет отображать data URL напрямую. Для HTMLполучателей используйте тег &lt;img&gt;.
- Для ответов Gemini в логах панель «Data» показывает превью (мы подаём полные изображения через SSE), а в «Response» структура JSON остаётся читабельной — там триммится только base64, не нарушая остальной структуры.
Где в коде реализовано
- Препроцессор img(mime)[[...]] добавлен в [render_template_simple()](agentui/pipeline/templating.py:191); регулярное выражение и обработчик находятся в [templating.py](agentui/pipeline/templating.py).
- Общая логика развёртки [[...]] и {{ ... }} — тоже в [render_template_simple()](agentui/pipeline/templating.py:191).
- Подсветка и превью в логах — см. [static/editor.html](static/editor.html).
Короткая памятка (копируйте в нужные места шаблонов)
- По умолчанию PNG:
img()[[OUT1]]
- Явный JPEG:
img(jpeg)[[VAR:OUT.n1.result.candidates.0.content.parts.1.inlineData.data]]
- Динамический MIME:
img({{ OUT.n1.result.candidates.0.content.parts.1.inlineData.mimeType|default('image/png') }})[[OUT1]]
- HTML превью:
&lt;img src="img(png)[[OUT1]]" alt="image"&gt;
— — —
Приложение B. Что изменилось в парсинге Gemini и [[OUTx]] (20250921)
Коротко
- Что поменяли: теперь текст из ответов Gemini собирается из ВСЕХ частей parts[].text во всех candidates, пустые/пробельные строки игнорируются, результат склеивается через "\n".
- Где реализовано:
- Алгоритм besteffort для [[OUTx]]/алиасов: [\_best_text_from_outputs()](agentui/pipeline/templating.py:133)
- Явная стратегия "gemini" для нод: [\_extract_text_for_out()](agentui/pipeline/executor.py:1590)
- Что это даёт: [[OUT1]]/[[OUT2]]… и {{ OUT.nX.response_text }} больше не «пустеют», когда первая часть ответа — "\n". Возвращается весь человекочитаемый текст.
Что было и что стало
- Было: брали только первый элемент parts[0].text → если там "\n", получали пустоту.
- Стало: проходим все parts, берём непустые .text (strip) и склеиваем. Если частей нет — работает прежний «глубокий поиск текста» (fallback).
Примеры
- Ответ Gemini (упрощённо):
{
"candidates": [{
"content": { "parts": [{ "text": "\n" }, { "text": "Полезный ответ" }] }
}]}
- Раньше [[OUT…]] → "" (или "\n")
- Теперь [[OUT…]] → "Полезный ответ"
Альтернатива через пресет/JSONPath (без правок кода)
- Если хотите явно контролировать извлечение текста:
- strategy="jsonpath"
- json_path="candidates.*.content.parts.*.text"
- join_sep="\n"
- Это эквивалент новой авто‑логики для Gemini и годится, когда нужна строгая предсказуемость.
Подсказка: RawForward и auto
- Для RawForward мы иногда используем «подсказку» провайдера (auto). Сейчас этого достаточно. При желании можно расширить авто‑детекцию для ответов Gemini по ключу "candidates" в [detect_vendor()](agentui/common/vendors.py:8), но это не обязательно для текущей логики [[OUTx]].
— — —
Приложение C. Как правильно писать условия If (шпаргалка)
Где работает
- Узел If выполняет булево выражение и открывает ветку true/false. Исполнение: [IfNode.run()](agentui/pipeline/executor.py:2892)
- Парсер/оценка выражений: [eval_condition_expr()](agentui/pipeline/templating.py:382)
Что поддерживается
- Операторы:
- Логика: &&, ||, ! (вместо "not" используйте "!")
- Сравнения: ==, !=, <, <=, >, >=
- Ключевое слово: contains (подстрока для строк или membership для списков)
- Скобки: (...)
- Макросы внутри выражения:
- [[OUT1]], [[OUT:n2.result...]], [[NAME]]
- {{ OUT.nX.response_text|default('') }}, {{ params.temperature|default(0.7) }}
- Строковые литералы: "..." или '...' (следите за закрывающей кавычкой)
Семантика contains простым языком
- Строки: A contains B → строка B входит в строку A (по подстроке).
- Списки/множества: A contains B → элемент B присутствует в коллекции A.
Частые шаблоны (скопируйте)
- Проверка фразы в тексте провайдера:
[[OUT3]] contains "Stream failed to"
- Объединение условий:
([[OUT3]] contains "Stream failed to") || ([[OUT3]] contains "gemini-2.5-pro")
- Числовые сравнения с запасным значением:
{{ params.temperature|default(0.7) }} >= 0.3 && {{ params.temperature|default(0.7) }} <= 1
- Проверка «не пусто»:
{{ OUT.n2.response_text|default('') }} != ""
- Негативная проверка:
!([[OUT3]] contains "error")
- Сложная проверка с несколькими ветками:
({{ params.max_tokens|default(256) }} >= 128) && !([[OUT1]] contains "retry")
Советы по устойчивости
- Используйте |default(...) у {{ ... }}, чтобы не падать на None/пустых значениях.
- Для длинных/непредсказуемых строк проверяйте наличие ключевого фрагмента через contains.
- Оборачивайте группы условий в (...) — так легче читать и поддерживать.
- Внимание к кавычкам: "..." и '...' должны закрываться; это самая частая причина синтаксических ошибок.
Антишаблоны (чего избегать)
- Незакрытые кавычки в строковых литералах.
- Путаница с not: используйте "!" (а не слово not).
- Попытка вызывать произвольные функции — в If разрешён только contains(a, b) (под капотом), всё остальное запрещено.
Проверка на практике
- Пример из рабочего пайплайна (идея):
If.expr: (([[OUT3]] contains "Stream failed to") || ([[OUT3]] contains "gemini-2.5-pro"))
Trueветка → Return "[[OUT3]]"
Falseветка → RawForward или другой ProviderCall
Где ещё посмотреть
- Развёртка макросов [[...]] и {{ ... }}: [render_template_simple()](agentui/pipeline/templating.py:196)
- Логи If с развернутым выражением (для отладки): [IfNode.run()](agentui/pipeline/executor.py:2892)

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 337 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 973 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

BIN
favicon_io_saya/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
favicon_io_saya/saya1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

View File

@@ -0,0 +1 @@
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}

View File

@@ -6,147 +6,32 @@
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"http_timeout_sec": 999.0,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfipb98aywtx6jepd5",
"name": "ввв",
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "ввв",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n1",
"type": "RawForward",
"pos_x": 441,
"pos_y": 354,
"config": {
"passthrough_headers": true,
"extra_headers": "{}",
"_origId": "n1"
},
"in": {
"depends": "n5.done"
}
},
{
"id": "n2",
"type": "ProviderCall",
"pos_x": 652,
"pos_y": 46,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": 500,\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[Clod]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"output-128k-2025-02-19\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('enabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(3000) }}\n }\n}"
}
},
"blocks": [
{
"id": "bmfmstojw",
"name": "Great assustant",
"role": "system",
"prompt": "You are Great assustant",
"enabled": true,
"order": 0
},
{
"id": "bmfchnynm",
"name": "Сделай [[OUT1]] красивее",
"role": "user",
"prompt": "Сделай [[OUT1]] красивее",
"enabled": true,
"order": 1
}
],
"_origId": "n2"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n3",
"type": "ProviderCall",
"pos_x": 654,
"pos_y": 566,
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": 500,\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfchn1hq",
"name": "Сделай [[OUT1]] красивее",
"role": "user",
"prompt": "Сделай [[OUT1]] красивее",
"enabled": true,
"order": 0
}
],
"_origId": "n3"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n4",
"type": "Return",
"pos_x": 1193,
"pos_y": 314,
"config": {
"target_format": "auto",
"text_template": "[[OUT6]] [[Test]]",
"_origId": "n4"
},
"in": {
"depends": "n7.true"
}
},
{
"id": "n5",
"type": "SetVars",
"pos_x": 171,
"pos_y": 487,
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
@@ -173,18 +58,49 @@
"in": {}
},
{
"id": "n6",
"type": "ProviderCall",
"pos_x": 923,
"pos_y": 345,
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"provider": "openai",
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n7.done"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 552,
"pos_y": 696,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": "n5.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": 500,\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
"headers": "{\"Authorization\":\"[[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
@@ -192,6 +108,12 @@
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
@@ -201,59 +123,76 @@
},
"blocks": [
{
"id": "bmfmk7g4a",
"name": "New Block",
"role": "system",
"prompt": "",
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатки разобрав каждое действие оценив его логичность и следование истории от 0до10, перепиши эти моменты на нормальные.",
"enabled": true,
"order": 0
},
{
"id": "bmfdyczbd",
"name": "Объедени [[OUT3]], [[OUT4]] сделай более красиво.",
"role": "user",
"prompt": "Объедени [ [[OUT3]], [[OUT2]] ] сделай более красиво. напиши слово \"Красиво\" в конце.",
"enabled": true,
"order": 1
},
{
"id": "bmfh98jkh",
"name": "New Block1",
"role": "system",
"prompt": "1",
"enabled": true,
"order": 2
},
{
"id": "bmfmk74yz",
"name": "New Block",
"role": "assistant",
"prompt": "fuf",
"enabled": true,
"order": 3
}
],
"_origId": "n6"
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n2.done",
"n3.done",
"n7.false"
]
"depends": "n3.done"
}
},
{
"id": "n7",
"type": "If",
"pos_x": 1311,
"pos_y": 566,
"type": "ProviderCall",
"pos_x": 1080,
"pos_y": 624,
"config": {
"expr": "[[OUT6]] contains \"Красиво\"",
"_origId": "n7"
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"claude_no_system": true,
"while_expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": "n6.done"
"depends": "n4.done"
}
}
]

237
presets/123123123.json Normal file
View File

@@ -0,0 +1,237 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n8.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 564,
"pos_y": 660,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 564,
"pos_y": 888,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатк1.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 792,
"pos_y": 876,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
},
{
"id": "n7",
"type": "ProviderCall",
"pos_x": 1068,
"pos_y": 540,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.false",
"n8.true"
]
}
},
{
"id": "n8",
"type": "If",
"pos_x": 1068,
"pos_y": 876,
"config": {
"expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"_origId": "n8"
},
"in": {
"depends": "n7.done"
}
}
]
}

191
presets/imgtests.json Normal file
View File

@@ -0,0 +1,191 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -603,
"pos_y": 637,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 509,
"pos_y": 459,
"config": {
"target_format": "auto",
"text_template": "[[OUT3]]",
"_origId": "n2"
},
"in": {
"depends": "n1.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 45,
"pos_y": 750,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": "n1.true"
}
},
{
"id": "n1",
"type": "If",
"pos_x": 344,
"pos_y": 730,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": -185.88888888888889,
"pos_y": 523,
"config": {
"provider": "gemini_image",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/gemini-2.5-flash-image-preview:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"gemini-2.5-flash-image-preview\",\n [[OUT3]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [],
"_origId": "n4"
},
"in": {
"depends": "n6.done"
}
},
{
"id": "n6",
"type": "ProviderCall",
"pos_x": -391,
"pos_y": 648,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[VAR:incoming.json.contents]],\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfuw6ayo",
"name": "Создание промпта",
"role": "user",
"prompt": "Создай промпт для генерации изображения исходя из последнего действие {{user}}. Промпт должен быть лаконичный, простенький, без сложных формулировок. В ответе не пиши ничего кроме промпта.",
"enabled": true,
"order": 0
}
],
"_origId": "n6"
},
"in": {
"depends": "n5.done"
}
}
]
}

171
presets/prepprst.json Normal file
View File

@@ -0,0 +1,171 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -125,
"pos_y": 561,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 954,
"pos_y": 564,
"config": {
"target_format": "auto",
"text_template": "[[OUT4]]",
"_origId": "n2"
},
"in": {
"depends": "n6.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 74,
"pos_y": 450.5,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 75,
"pos_y": 909,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 663,
"pos_y": 335,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатки.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 675,
"pos_y": 882.25,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
}
]
}

105
presets/retry.json Normal file
View File

@@ -0,0 +1,105 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -125,
"pos_y": 561,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 507,
"pos_y": 459,
"config": {
"target_format": "auto",
"text_template": "[[OUT3]]",
"_origId": "n2"
},
"in": {
"depends": "n1.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 114,
"pos_y": 425,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 344,
"pos_y": 730,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
}
]
}

View File

@@ -40,7 +40,7 @@
"pos_x": 652,
"pos_y": 46,
"config": {
"provider": "claude",
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
@@ -59,6 +59,12 @@
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[Clod]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"output-128k-2025-02-19\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('enabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(3000) }}\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
}
},
"blocks": [
@@ -77,14 +83,6 @@
"prompt": "Сделай [[OUT1]] красивее",
"enabled": true,
"order": 1
},
{
"id": "bmfmssvy8",
"name": "New Block",
"role": "assistant",
"prompt": "Sure",
"enabled": true,
"order": 2
}
],
"_origId": "n2"
@@ -118,6 +116,12 @@
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
}
},
"blocks": [
@@ -205,6 +209,12 @@
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
}
},
"blocks": [

171
presets/testtesttt.json Normal file
View File

@@ -0,0 +1,171 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -125,
"pos_y": 561,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 954,
"pos_y": 564,
"config": {
"target_format": "auto",
"text_template": "[[OUT4]]",
"_origId": "n2"
},
"in": {
"depends": "n6.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 72,
"pos_y": 444,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 75,
"pos_y": 909,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 663,
"pos_y": 335,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатк.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 675,
"pos_y": 882.25,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
}
]
}

285
presets/tttttt.json Normal file
View File

@@ -0,0 +1,285 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n8.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 588,
"pos_y": 624,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 564,
"pos_y": 876,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"[[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатк1.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.messages]] & [[PROMPT]]"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 792,
"pos_y": 876,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
},
{
"id": "n7",
"type": "ProviderCall",
"pos_x": 1056,
"pos_y": 624,
"config": {
"provider": "claude",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.messages]] & [[PROMPT]]",
"claude_no_system": true
},
"in": {
"depends": [
"n6.false",
"n8.true"
]
}
},
{
"id": "n8",
"type": "If",
"pos_x": 1068,
"pos_y": 876,
"config": {
"expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"_origId": "n8"
},
"in": {
"depends": "n7.done"
}
},
{
"id": "n9",
"type": "ProviderCall",
"pos_x": 1104,
"pos_y": 456,
"config": {
"provider": "claude",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"top_p\": 1,\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmg26nusx",
"name": "New Block",
"role": "user",
"prompt": "Hey",
"enabled": true,
"order": 0
}
],
"_origId": "n9"
},
"in": {}
}
]
}

247
presets/tttttt1.json Normal file
View File

@@ -0,0 +1,247 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n8.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 588,
"pos_y": 624,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 600,
"pos_y": 876,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"[[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатки разобрав каждое действие оценив его логичность и следование истории от 0до10, перепиши эти моменты на нормальные.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 852,
"pos_y": 960,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
},
{
"id": "n7",
"type": "ProviderCall",
"pos_x": 1080,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"claude_no_system": true,
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n6.false",
"n8.true"
]
}
},
{
"id": "n8",
"type": "If",
"pos_x": 1068,
"pos_y": 876,
"config": {
"expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"_origId": "n8"
},
"in": {
"depends": "n7.done"
}
}
]
}

View File

@@ -1,8 +1,8 @@
fastapi==0.112.2
fastapi==0.115.2
uvicorn==0.30.6
pydantic==2.8.2
httpx==0.27.0
starlette==0.38.2
httpx[socks]==0.27.0
starlette==0.40.0
brotlicffi
brotli

View File

@@ -1,27 +1,51 @@
@echo off
setlocal
chcp 65001 >NUL
set PORT=7860
echo Installing dependencies...
python -m pip install --upgrade pip
REM -------- Config --------
if "%PORT%"=="" set PORT=7860
if "%HOST%"=="" set HOST=127.0.0.1
REM ------------------------
echo [НадTavern] Preparing virtual environment...
REM Pick Python launcher
where py >NUL 2>&1
if %ERRORLEVEL%==0 (
set PY=py
) else (
set PY=python
)
REM Create venv if missing
if not exist ".venv\Scripts\python.exe" (
%PY% -m venv .venv
if errorlevel 1 goto :fail
)
set "VENV_PY=.venv\Scripts\python.exe"
echo [НадTavern] Upgrading pip...
"%VENV_PY%" -m pip install --upgrade pip
if errorlevel 1 goto :fail
pip install -r requirements.txt
echo [НадTavern] Installing dependencies from requirements.txt...
"%VENV_PY%" -m pip install -r requirements.txt
if errorlevel 1 goto :fail
echo Starting НадTavern on http://127.0.0.1:%PORT%/
echo [НадTavern] Starting on http://%HOST%:%PORT%/
timeout /t 1 /nobreak >NUL
start "" "http://127.0.0.1:%PORT%/ui/editor.html"
python -m uvicorn agentui.api.server:app --host 127.0.0.1 --port %PORT% --log-level info
start "" "http://%HOST%:%PORT%/ui/editor.html"
"%VENV_PY%" -m uvicorn agentui.api.server:app --host %HOST% --port %PORT% --log-level info
if errorlevel 1 goto :fail
goto :end
:fail
echo.
echo Server failed with errorlevel %errorlevel%.
echo [НадTavern] Server failed with errorlevel %errorlevel%.
echo Check the console output above and the file agentui.log for details.
pause
:end
pause
endlocal

View File

@@ -1,10 +1,11 @@
#!/usr/bin/env bash
set -euo pipefail
# НадTavern Linux launcher
# НадTavern Linux/macOS launcher with local .venv bootstrap
# Usage:
# chmod +x ./run_agentui.sh
# ./run_agentui.sh
# Optional env: HOST=0.0.0.0 PORT=7860
# Go to repo root (script location)
cd "$(dirname "$0")"
@@ -12,18 +13,28 @@ cd "$(dirname "$0")"
PORT="${PORT:-7860}"
HOST="${HOST:-127.0.0.1}"
echo "Installing dependencies..."
# Pick python
if command -v python3 >/dev/null 2>&1; then
PY=python3
else
PY=python
fi
"$PY" -m pip install --upgrade pip
"$PY" -m pip install -r requirements.txt
# Create venv if missing
if [ ! -f ".venv/bin/python" ]; then
echo "[НадTavern] Creating .venv ..."
"$PY" -m venv .venv
fi
echo "Starting НадTavern on http://$HOST:$PORT/"
VENV_PY=".venv/bin/python"
echo "[НадTavern] Upgrading pip ..."
"$VENV_PY" -m pip install --upgrade pip
echo "[НадTavern] Installing deps from requirements.txt ..."
"$VENV_PY" -m pip install -r requirements.txt
echo "[НадTavern] Starting on http://$HOST:$PORT/"
# Try to open UI editor in default browser (non-fatal if fails)
if command -v xdg-open >/dev/null 2>&1; then
@@ -32,4 +43,4 @@ elif command -v open >/dev/null 2>&1; then
open "http://$HOST:$PORT/ui/editor.html" >/dev/null 2>&1 || true
fi
exec "$PY" -m uvicorn agentui.api.server:app --host "$HOST" --port "$PORT" --log-level info
exec "$VENV_PY" -m uvicorn agentui.api.server:app --host "$HOST" --port "$PORT" --log-level info

View File

@@ -15,31 +15,38 @@ html, body, button, input, select, textarea, code, pre, a, .chip-btn, .group-tit
--node: #0e1116;
--node-border: #334155;
--node-selected: #1f2937;
/* Базовый цвет проводов по умолчанию */
--connector: #7aa2f7;
--connector-muted: #3b82f6;
/* Неброские цвета для разных типов/веток */
--wire-true: #34d399; /* мягкий зелёный для If:true */
--wire-false: #94a3b8; /* сланцево‑серый для If:false */
--wire-provider: #5b86e5; /* приглушённый синий */
--wire-raw: #8b7de6; /* мягкий фиолетовый */
--wire-setvars: #4fbfa0; /* приглушённая мята */
--wire-return: #93a9d1; /* холодный серо‑синий */
/* DRY tokens: unified shadows and transitions */
--ring3-22-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35);
--ring3-20-shadow: 0 0 0 3px rgba(96,165,250,.20), 0 4px 10px rgba(0,0,0,.35);
--ring2-20-shadow: 0 0 0 2px rgba(96,165,250,.20), 0 2px 6px rgba(0,0,0,.35);
--focus-ring3-20: 0 0 0 3px rgba(96,165,250,.20);
--focus-ring3-22: 0 0 0 3px rgba(96,165,250,.22);
--tr-base: border-color .12s ease, box-shadow .12s ease, background-color .12s ease, color .12s ease;
--tr-pop: transform .12s ease;
--tr-pop-fast: transform .08s ease;
}
html, body {
height: 100%;
overflow: hidden; /* убираем общие скролл-бары страницы, чтобы не перекрывать правую стрелку */
}
/* Глобальные контейнеры и скроллы */
html, body {
height: 100%;
overflow: hidden; /* убираем общие скролл-бары страницы */
}
#container {
position: relative; /* якорь для абсолютных стрелок-переключателей */
}
/* Глобальные контейнеры и скроллы */
html, body {
height: 100%;
overflow: hidden; /* убираем общие скролл-бары страницы */
}
#container {
position: relative; /* якорь для абсолютных стрелок-переключателей */
}
/* Grid areas to hard-pin layout regardless of hidden panels or absolute children */
#container {
display: grid;
@@ -77,7 +84,36 @@ html, body {
border: 1px solid var(--node-border);
color: #e5e7eb;
border-radius: 12px 12px 0 0;
padding: 6px 10px;
padding: 4px 8px; /* компактнее заголовок */
font-size: 12px; /* компактнее шрифт заголовка */
line-height: 1.2;
}
/* Иконка типа ноды в заголовке (монохромная, спокойная) */
.drawflow .drawflow-node .title-box .node-ico {
display: inline-block;
width: 14px;
height: 14px;
margin-right: 6px;
vertical-align: -2px;
background-size: 14px 14px;
background-repeat: no-repeat;
filter: opacity(.9);
}
/* SVG-иконки по типам (цвета под стиль проекта) */
.drawflow .drawflow-node .title-box .node-ico-If {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%2394a3b8' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M6 4v6a4 4 0 0 0 4 4h4'/><polyline points='14 14 18 10 14 6'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-ProviderCall {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%235b86e5' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M3 15a4 4 0 0 0 4 4h10a4 4 0 0 0 4-4'/><path d='M7 19V5a4 4 0 0 1 4-4h2a4 4 0 0 1 4 4v14'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-RawForward {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%238b7de6' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><polyline points='5 12 19 12'/><polyline points='12 5 19 12 12 19'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-SetVars {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%234fbfa0' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><circle cx='12' cy='12' r='3'/><path d='M19.4 15a1.65 1.65 0 0 0 .33 1.82l.06.06a2 2 0 1 1-2.83 2.83l-.06-.06A1.65 1.65 0 0 0 15 19.4a1.65 1.65 0 0 0-1 .6l-.09.1a2 2 0 1 1-3.2 0l-.09-.1a1.65 1.65 0 0 0-1-.6 1.65 1.65 0 0 0-1.82.33l-.06.06a2 2 0 1 1-2.83-2.83l.06-.06A1.65 1.65 0 0 0 4.6 15a1.65 1.65 0 0 0-.6-1l-.1-.09a2 2 0 1 1 0-3.2l.1-.09a1.65 1.65 0 0 0 .6-1 1.65 1.65 0 0 0-.33-1.82l-.06-.06A2 2 0 1 1 6.94 2.6l.06.06A1.65 1.65 0 0 0 8 3.6a1.65 1.65 0 0 0 1-.6l.09-.1a2 2 0 1 1 3.2 0l.09.1a1.65 1.65 0 0 0 1 .6 1.65 1.65 0 0 0 1.82-.33l.06-.06a2 2 0 1 1 2.83 2.83l-.06.06a1.65 1.65 0 0 0-.33 1.82 1.65 1.65 0 0 0 .6 1l.1.09a2 2 0 1 1 0 3.2l-.1.09a1.65 1.65 0 0 0-.6 1z'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-Return {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%2393a9d1' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M9 10l-5 5 5 5'/><path d='M20 4v7a4 4 0 0 1-4 4H4'/></svg>");
}
.drawflow .drawflow-node .box {
@@ -87,11 +123,28 @@ html, body {
color: #e5e7eb;
border-radius: 0 0 12px 12px;
overflow: hidden; /* не даём контенту вылезать за края */
font-size: 11px; /* компактнее содержимое */
line-height: 1.25;
}
/* Контент превью внутри .box: можем скрывать его в LOD, не ломая геометрию ноды */
/* Контент превью внутри .box: можем скрывать его в LOD, не меняя коробку ноды */
.drawflow .drawflow-node .node-preview {
pointer-events: none;
pointer-events: auto; /* разрешаем клики по summary (<details>) */
opacity: .85;
font-size: 10.5px; /* мелкий общий текст превью */
}
/* На самом канвасе поля превью недоступны для редактирования/клика */
.drawflow .drawflow-node .node-preview input,
.drawflow .drawflow-node .node-preview textarea {
pointer-events: none;
}
.drawflow .drawflow-node .node-preview label {
font-size: 10px;
margin: 4px 0 2px;
}
/* Адресные поля читаемые «обычным» кеглем */
.drawflow .drawflow-node .node-preview .np-url,
.drawflow .drawflow-node .node-preview .np-endpoint {
font-size: 12px !important;
}
.drawflow .drawflow-node .box textarea,
@@ -104,6 +157,9 @@ html, body {
width: 100%;
max-width: 100%;
box-sizing: border-box;
padding: 6px 8px; /* компактнее поля превью */
font-size: 10.5px; /* мелкий текст по умолчанию */
resize: none; /* запрет изменения размера на канвасе */
}
.df-node .box textarea {
@@ -112,6 +168,7 @@ html, body {
overflow-y: auto; /* только вертикальный скролл при необходимости */
overflow-x: hidden; /* убираем горизонтальный скролл внутри textarea */
max-height: 180px; /* предотвращаем бесконечную высоту */
resize: none; /* запрет ручного ресайза превью */
}
/* Выделение выбранного узла — мягкое */
@@ -120,6 +177,15 @@ html, body {
border-color: var(--accent);
box-shadow: 0 0 0 1px color-mix(in srgb, var(--accent) 40%, transparent);
}
/* Привести disabled к виду обычных превью (без «серости» браузера) */
.drawflow .drawflow-node .box input[disabled],
.drawflow .drawflow-node .box textarea[disabled] {
opacity: 1;
color: #e5e7eb;
background: #0f141a;
border-color: #2b3646;
cursor: default;
}
/* Порты: более аккуратные, без «оранжевого» */
.drawflow .drawflow-node .inputs .input,
@@ -131,19 +197,36 @@ html, body {
box-shadow: 0 0 0 2px rgba(0,0,0,.25);
}
/* Линии соединений: плавные, аккуратные цвета */
/* Линии соединений: тоньше и спокойнее */
.drawflow .connection .main-path {
stroke: var(--connector) !important;
/* Толщина линии масштабируется от зума (var(--zoom) задаётся на #canvas из JS) */
stroke-width: clamp(1.6px, calc(3px / var(--zoom, 1)), 6px) !important;
opacity: 0.95 !important;
stroke-width: clamp(1px, calc(2.2px / var(--zoom, 1)), 4.5px) !important;
opacity: 0.9 !important;
stroke-linecap: round; /* сглаженные окончания */
stroke-linejoin: round; /* сглажённые соединения */
}
/* Connection styling classes (set by JS; stable even if Drawflow re-renders paths) */
.drawflow .connection.conn-if-true .main-path {
stroke: var(--wire-true) !important;
stroke-dasharray: 6 6 !important;
}
.drawflow .connection.conn-if-false .main-path {
stroke: var(--wire-false) !important;
stroke-dasharray: 6 6 !important;
}
.drawflow .connection.conn-provider .main-path { stroke: var(--wire-provider) !important; }
.drawflow .connection.conn-raw .main-path { stroke: var(--wire-raw) !important; }
.drawflow .connection.conn-setvars .main-path { stroke: var(--wire-setvars) !important; }
.drawflow .connection.conn-return .main-path { stroke: var(--wire-return) !important; }
/* Подсветка входящих к ошибочной ноде рёбер (мягкий красный) */
.drawflow .connection.conn-upstream-err .main-path { stroke: #ef4444 !important; opacity: .95 !important; }
.drawflow .connection .main-path.selected,
.drawflow .connection:hover .main-path {
stroke: var(--accent-2) !important;
/* На hover/selected — немного толще базовой формулы */
stroke-width: clamp(2px, calc(3.6px / var(--zoom, 1)), 7px) !important;
/* На hover/selected — слегка толще базовой формулы */
stroke-width: clamp(1.3px, calc(2.6px / var(--zoom, 1)), 5px) !important;
}
/* Точки изгибов/ручки */
@@ -268,19 +351,19 @@ a.chip-btn {
color: #e5e7eb;
border: 1px solid #334155;
box-shadow: 0 2px 6px rgba(0,0,0,.35);
transition: transform .12s ease, box-shadow .12s ease, background-color .12s ease, border-color .12s ease, color .12s ease;
transition: var(--tr-base), var(--tr-pop);
user-select: none;
}
.chip-btn:hover,
a.chip-btn:hover {
background: #1f2937;
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35);
box-shadow: var(--ring3-22-shadow);
}
.chip-btn:active,
a.chip-btn:active {
transform: translateY(1px);
box-shadow: 0 0 0 2px rgba(96,165,250,.20), 0 2px 6px rgba(0,0,0,.35);
box-shadow: var(--ring2-20-shadow);
}
/* Инпуты и селекты в шапке — в одном визуальном ряду с чипами */
@@ -297,7 +380,7 @@ a.chip-btn:active {
}
.top-input:focus {
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.20);
box-shadow: var(--focus-ring3-20);
}
/* Внутренние заголовки в блоке ноды */
@@ -322,7 +405,7 @@ a.chip-btn:active {
box-shadow: 0 2px 6px rgba(0,0,0,.35);
cursor: pointer;
opacity: .85;
transition: transform .12s ease, opacity .12s ease, box-shadow .12s ease, border-color .12s ease, background-color .12s ease;
transition: var(--tr-base), var(--tr-pop), opacity .12s ease;
}
.drawflow .connection:hover foreignObject,
.drawflow .connection:hover [class*="remove"],
@@ -331,7 +414,7 @@ a.chip-btn:active {
opacity: 1;
transform: scale(1.05);
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.20), 0 4px 10px rgba(0,0,0,.35);
box-shadow: var(--ring3-20-shadow);
}
/* If delete control is rendered inside foreignObject, normalize inner box */
.drawflow .connection foreignObject div,
@@ -685,7 +768,7 @@ a.chip-btn:active {
/* Port hover affordance (no heavy effects) */
.drawflow .drawflow-node .inputs .input,
.drawflow .drawflow-node .outputs .output {
transition: transform .08s ease;
transition: var(--tr-pop-fast);
will-change: transform;
}
.drawflow .drawflow-node .inputs .input:hover,
@@ -712,18 +795,18 @@ a.chip-btn:active {
box-shadow: 0 2px 6px rgba(0,0,0,.35) !important;
cursor: pointer !important;
z-index: 10 !important;
transition: transform .12s ease, box-shadow .12s ease, background-color .12s ease, border-color .12s ease, color .12s ease !important;
transition: var(--tr-base), var(--tr-pop) !important;
}
.drawflow .drawflow-node .close:hover {
transform: scale(1.06) !important;
background: #1f2937 !important;
border-color: var(--accent-2) !important;
color: #f8fafc !important;
box-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35) !important;
box-shadow: var(--ring3-22-shadow) !important;
}
.drawflow .drawflow-node .close:active {
transform: scale(0.98) !important;
box-shadow: 0 0 0 2px rgba(96,165,250,.20), 0 2px 6px rgba(0,0,0,.35) !important;
box-shadow: var(--ring2-20-shadow) !important;
}
/* Drawflow floating delete handle (class: .drawflow-delete) — restyle but keep behavior */
#drawflow .drawflow-delete,
@@ -741,7 +824,7 @@ a.chip-btn:active {
box-shadow: 0 2px 6px rgba(0,0,0,.35) !important;
cursor: pointer !important;
z-index: 1000 !important;
transition: transform .12s ease, box-shadow .12s ease, background-color .12s ease, border-color .12s ease !important;
transition: var(--tr-base), var(--tr-pop) !important;
}
#drawflow .drawflow-delete::before,
.drawflow-delete::before {
@@ -757,7 +840,7 @@ a.chip-btn:active {
transform: translate(-50%, -50%) scale(1.06) !important;
background: #1f2937 !important;
border-color: var(--accent-2) !important;
box-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35) !important;
box-shadow: var(--ring3-22-shadow) !important;
}
#drawflow .drawflow-delete:active,
.drawflow-delete:active {
@@ -766,7 +849,7 @@ a.chip-btn:active {
/* Execution highlight states (SSE-driven) */
.drawflow .drawflow-node .title-box,
.drawflow .drawflow-node .box {
transition: border-color .12s ease, box-shadow .12s ease, background-color .12s ease;
transition: var(--tr-base);
}
.drawflow .drawflow-node.node-running .title-box,
@@ -808,16 +891,6 @@ a.chip-btn:active {
transform: translate(-50%, -100%);
z-index: 1000; /* above nodes/edges but below menus */
}
/* Снимаем скролл-бары с контейнера Drawflow, чтобы не перекрывать правую стрелку */
#drawflow {
overflow: hidden !important;
position: relative;
z-index: 1; /* гарантируем, что канвас виден под HUD и над фоном */
/* Растянем контейнер Drawflow на всю центральную колонку */
width: 100%;
height: 100%;
display: block;
}
/* Panels collapse controls and layout */
#container.collapse-left {
@@ -1002,11 +1075,7 @@ select#pm-role {
outline: none;
font: 12px/1 Inter, system-ui, Arial, sans-serif;
transition:
border-color .12s ease,
box-shadow .12s ease,
background-color .12s ease,
color .12s ease;
transition: var(--tr-base);
}
/* Hover and focus states consistent with .top-input */
@@ -1025,7 +1094,7 @@ select#vars-scope:focus,
select.v-mode:focus,
select#pm-role:focus {
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.20);
box-shadow: var(--focus-ring3-20);
}
/* Compact width contexts: keep natural width unless container forces 100% */
@@ -1202,4 +1271,196 @@ header { position: relative; }
}
#inspector .var-row .v-del {
flex: 0 0 auto;
}
}
/* --- Wire labels and arrows overlay --- */
#wire-labels {
position: absolute;
inset: 0;
pointer-events: none;
z-index: 4; /* над линиями, под панелями */
}
.wire-label {
position: absolute;
transform: translate(-50%, -50%);
background: #10151c;
color: #e5e7eb;
border: 1px solid rgba(148,163,184,.35);
border-radius: 6px;
padding: 1px 4px;
font: 10px/1.2 Inter, system-ui, Arial, sans-serif;
white-space: nowrap;
opacity: .9;
user-select: none;
}
.wire-arrow {
position: absolute;
width: 0;
height: 0;
border-left: 6px solid transparent;
border-right: 6px solid transparent;
border-top: 8px solid var(--connector); /* перекрашивается inline из цвета линии */
transform-origin: 50% 70%;
opacity: .95;
}
/* Димминг посторонних связей при фокусе ноды */
.drawflow .connection.dim .main-path {
opacity: .35 !important;
}
/* --- Сворачиваемые блоки превью в нодах --- */
.np-coll { margin: 4px 0; }
.np-coll > summary {
list-style: none;
cursor: pointer;
color: var(--muted);
font-size: 10px;
margin: 4px 0 2px;
}
.np-coll > summary::-webkit-details-marker { display: none; }
.np-coll[open] > summary { color: #cbd5e1; }
/* groups overlay removed */
/* --- Canvas preview sanitization: hide hints/labels/checkboxes (only on canvas node previews) --- */
/* Скрываем визуальные хинты, подписи и «галочки» только внутри превью нод на канвасе.
Summary секции (headers/template) остаются видимыми, textarea/inputs продолжают отображать значения. */
#canvas .drawflow .drawflow-node .node-preview .hint,
#canvas .drawflow .drawflow-node .node-preview label,
#canvas .drawflow .drawflow-node .node-preview input[type="checkbox"] {
display: none !important;
}
/* --- Unified checkbox style across UI --- */
/* Единый тёмный стиль чекбоксов под тему проекта (акцент — var(--accent-2)).
Применяется ко всей UI (инспектор, «Запуск», Prompt Blocks, STOREпанель и т.д.).
На канвасе в превью чекбоксы скрыты блоком выше. */
input[type="checkbox"] {
-webkit-appearance: none;
appearance: none;
width: 16px;
height: 16px;
display: inline-block;
vertical-align: -2px;
border: 1px solid #334155;
border-radius: 4px;
background: #0f141a;
box-shadow: 0 0 0 0 rgba(96,165,250,0.0);
transition:
background-color .12s ease,
border-color .12s ease,
box-shadow .12s ease,
transform .06s ease;
cursor: pointer;
}
input[type="checkbox"]:hover {
background: #121820;
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.18);
}
input[type="checkbox"]:active {
transform: scale(0.96);
}
input[type="checkbox"]:checked {
border-color: var(--accent-2);
background-color: #0f141a;
background-image: url("data:image/svg+xml;utf8,&lt;svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%2360a5fa' stroke-width='3' stroke-linecap='round' stroke-linejoin='round'&gt;&lt;polyline points='20 6 9 17 4 12'/&gt;&lt;/svg&gt;");
background-repeat: no-repeat;
background-position: center;
background-size: 12px 12px;
}
input[type="checkbox"]:focus-visible {
outline: none;
border-color: var(--accent-2);
box-shadow: var(--focus-ring3-22);
}
input[type="checkbox"]:disabled {
opacity: .6;
cursor: not-allowed;
box-shadow: none;
}
/* --- Enhanced checkbox visual: add glowing blue dot at center --- */
/* Применяется ко всем чекбоксам в UI (инспектор, Запуск, Prompt Blocks, STORE и т.д.).
В превью нод на канвасе чекбоксы скрыты ранее добавленным правилом. */
input[type="checkbox"] {
position: relative; /* для центрирования псевдо-элемента */
overflow: visible; /* безопасно для свечения */
}
input[type="checkbox"]::after {
content: "";
position: absolute;
left: 50%;
top: 50%;
width: 6px;
height: 6px;
border-radius: 999px;
background: var(--accent-2);
transform: translate(-50%, -50%) scale(0.6);
opacity: .6;
/* мягкое синее свечение в покое */
box-shadow:
0 0 4px rgba(96,165,250,.45),
0 0 10px rgba(96,165,250,.25);
transition:
transform .12s ease,
opacity .12s ease,
box-shadow .12s ease;
}
input[type="checkbox"]:checked::after {
transform: translate(-50%, -50%) scale(1.0);
opacity: 1;
/* усиленное свечение при включении */
box-shadow:
0 0 6px rgba(96,165,250,.80),
0 0 14px rgba(96,165,250,.60),
0 0 24px rgba(96,165,250,.35);
}
input[type="checkbox"]:disabled::after {
opacity: .35;
box-shadow: 0 0 2px rgba(96,165,250,.25);
}
/* --- Unified number input style across UI --- */
/* Единый стиль для всех input[type=number], включая инспектор, «Запуск», SERVICEпанели и т.д. */
input[type="number"] {
width: 100%;
background: #0f141a;
color: #e5e7eb;
border: 1px solid #2b3646;
border-radius: 8px;
padding: 6px 8px;
height: 32px;
box-sizing: border-box;
font: 12px/1 Inter, system-ui, Arial, sans-serif;
transition: var(--tr-base);
}
input[type="number"]:hover {
background: #121820;
border-color: var(--accent-2);
}
input[type="number"]:focus {
outline: none;
border-color: var(--accent-2);
box-shadow: var(--focus-ring3-20);
}
input[type="number"]:disabled {
opacity: .6;
cursor: not-allowed;
box-shadow: none;
}
/* Убираем нативные «стрелочки», чтобы стиль был единым во всех браузерах */
input[type="number"]::-webkit-outer-spin-button,
input[type="number"]::-webkit-inner-spin-button {
-webkit-appearance: none;
margin: 0;
}
input[type="number"] {
-moz-appearance: textfield;
}
/* --- Canvas preview sanitization (напоминание): хинты/лейблы/чекбоксы скрыты в превью --- */
/* Секции summary (headers/template) остаются видимыми */

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,12 @@
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>НадTavern</title>
<link rel="icon" href="/favicon.ico" />
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png" />
<link rel="apple-touch-icon" href="/apple-touch-icon.png" />
<link rel="manifest" href="/site.webmanifest" />
<meta name="theme-color" content="#ffffff" />
<style>
body { font-family: Arial, sans-serif; margin: 24px; }
textarea { width: 100%; height: 200px; }

View File

@@ -21,10 +21,15 @@
// Готовим новые данные с глубокой копией blocks
const newData = { ...(n.data || {}), blocks: Array.isArray(d2.blocks) ? d2.blocks.map(b => ({ ...b })) : [] };
// 1) Обновляем внутреннее состояние Drawflow, чтобы export() возвращал актуальные данные
try { editor.updateNodeDataFromId(id, newData); } catch (e) {}
// 2) Обновляем DOM-отражение (источник правды для toPipelineJSON)
const el2 = document.querySelector(`#node-${id}`);
if (el2) el2.__data = JSON.parse(JSON.stringify(newData));
try {
if (w.AU && typeof w.AU.updateNodeDataAndDom === 'function') {
w.AU.updateNodeDataAndDom(editor, id, newData);
} else {
editor.updateNodeDataFromId(id, newData);
const el2 = document.querySelector(`#node-${id}`);
if (el2) el2.__data = JSON.parse(JSON.stringify(newData));
}
} catch (e) {}
} catch (e) {}
}
// Initial sync to attach blocks into __data for toPipelineJSON

View File

@@ -0,0 +1,158 @@
/* global window */
(function (w) {
'use strict';
// Centralized registry for provider-specific defaults (base_url, endpoint, headers, template)
// Exposes window.ProviderTemplates with:
// .register(name, { defaultConfig: () => ({ base_url, endpoint, headers, template }) })
// .defaults(provider)
// .ensureConfigs(nodeData)
// .getActiveProv(nodeData)
// .getActiveCfg(nodeData)
// .providers()
const PT = {};
const _registry = new Map();
function norm(p) {
return String(p == null ? 'openai' : p).toLowerCase().trim();
}
PT.register = function register(name, def) {
const key = norm(name);
if (!def || typeof def.defaultConfig !== 'function') {
throw new Error('ProviderTemplates.register: def.defaultConfig() required');
}
_registry.set(key, { defaultConfig: def.defaultConfig });
};
PT.providers = function providers() {
return Array.from(_registry.keys());
};
PT.defaults = function defaults(provider) {
const key = norm(provider);
const rec = _registry.get(key);
if (rec && typeof rec.defaultConfig === 'function') {
try { return rec.defaultConfig(); } catch (_) {}
}
return { base_url: '', endpoint: '', headers: `{}`, template: `{}` };
};
PT.ensureConfigs = function ensureConfigs(d) {
if (!d) return;
if (!d.provider) d.provider = 'openai';
if (!d.provider_configs || typeof d.provider_configs !== 'object') d.provider_configs = {};
for (const p of PT.providers()) {
if (!d.provider_configs[p]) d.provider_configs[p] = PT.defaults(p);
}
};
PT.getActiveProv = function getActiveProv(d) {
return norm(d && d.provider);
};
PT.getActiveCfg = function getActiveCfg(d) {
PT.ensureConfigs(d);
const p = PT.getActiveProv(d);
return d && d.provider_configs ? (d.provider_configs[p] || {}) : {};
};
// --- Built-in providers (default presets) ---
// Templates mirror original editor.html logic; use macros [[...]] and {{ ... }} as-is.
function T_OPENAI() { return `{
"model": "{{ model }}",
[[PROMPT]],
"temperature": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},
"top_p": {{ incoming.json.top_p|default(params.top_p|default(1)) }},
"max_tokens": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},
"max_completion_tokens": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},
"presence_penalty": {{ incoming.json.presence_penalty|default(0) }},
"frequency_penalty": {{ incoming.json.frequency_penalty|default(0) }},
"stop": {{ incoming.json.stop|default(params.stop|default([])) }},
"stream": {{ incoming.json.stream|default(false) }}
}`; }
function T_GEMINI() { return `{
"model": "{{ model }}",
[[PROMPT]],
"safetySettings": {{ incoming.json.safetySettings|default([]) }},
"generationConfig": {
"temperature": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},
"topP": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},
"maxOutputTokens": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},
"stopSequences": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},
"candidateCount": {{ incoming.json.generationConfig.candidateCount|default(1) }},
"thinkingConfig": {
"includeThoughts": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},
"thinkingBudget": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}
}
}
}`; }
function T_GEMINI_IMAGE() { return `{
"model": "{{ model }}",
[[PROMPT]]
}`; }
function T_CLAUDE() { return `{
"model": "{{ model }}",
[[PROMPT]],
"temperature": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},
"top_p": {{ incoming.json.top_p|default(params.top_p|default(1)) }},
"max_tokens": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},
"stop_sequences": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},
"stream": {{ incoming.json.stream|default(false) }},
"thinking": {
"type": "{{ incoming.json.thinking.type|default('disabled') }}",
"budget_tokens": {{ incoming.json.thinking.budget_tokens|default(0) }}
},
"anthropic_version": "{{ anthropic_version|default('2023-06-01') }}"
}`; }
// Register built-ins
PT.register('openai', {
defaultConfig: () => ({
base_url: 'https://api.openai.com',
endpoint: '/v1/chat/completions',
headers: `{"Authorization":"Bearer [[VAR:incoming.headers.authorization]]"}`,
template: T_OPENAI()
})
});
PT.register('gemini', {
defaultConfig: () => ({
base_url: 'https://generativelanguage.googleapis.com',
endpoint: '/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]',
headers: `{}`,
template: T_GEMINI()
})
});
PT.register('gemini_image', {
defaultConfig: () => ({
base_url: 'https://generativelanguage.googleapis.com',
endpoint: '/v1beta/models/{{ model }}:generateContent',
headers: `{"x-goog-api-key":"[[VAR:incoming.api_keys.key]]"}`,
template: T_GEMINI_IMAGE()
})
});
PT.register('claude', {
defaultConfig: () => ({
base_url: 'https://api.anthropic.com',
endpoint: '/v1/messages',
headers: `{"x-api-key":"[[VAR:incoming.headers.x-api-key]]","anthropic-version":"2023-06-01","anthropic-beta":"[[VAR:incoming.headers.anthropic-beta]]"}`,
template: T_CLAUDE()
})
});
try { console.debug('[ProviderTemplates] providers:', PT.providers()); } catch (_) {}
// Export globals and compatibility shims
try {
w.ProviderTemplates = PT;
// Back-compat shims so existing code can call global helpers
w.providerDefaults = PT.defaults;
w.ensureProviderConfigs = PT.ensureConfigs;
w.getActiveProv = PT.getActiveProv;
w.getActiveCfg = PT.getActiveCfg;
} catch (_) {}
})(window);

View File

@@ -12,7 +12,8 @@
// Top-level pipeline meta kept in memory and included into JSON on save.
// Allows UI to edit loop parameters without manual JSON edits.
let _pipelineMeta = {
// DRY: единый источник дефолтов и нормализации meta
const MetaDefaults = Object.freeze({
id: 'pipeline_editor',
name: 'Edited Pipeline',
parallel_limit: 8,
@@ -20,19 +21,74 @@
loop_max_iters: 1000,
loop_time_budget_ms: 10000,
clear_var_store: true,
// New: default HTTP timeout for upstream requests (seconds)
http_timeout_sec: 60,
// New (v1): стратегия извлечения текста для [[OUTx]] (глобальная по умолчанию)
// auto | deep | openai | gemini | claude | jsonpath
text_extract_strategy: 'auto',
// Используется при стратегии jsonpath (dot-нотация, поддержка индексов: a.b.0.c)
text_extract_json_path: '',
// Разделитель при объединении массива результатов
text_join_sep: '\n',
// v2: коллекция пресетов извлечения текста, управляется в "Запуск"
// [{ id, name, strategy, json_path, join_sep }]
text_extract_presets: [],
};
});
let _pipelineMeta = { ...MetaDefaults };
// Нормализатор meta: приводит типы, поддерживает синонимы ключей, заполняет дефолты
function ensureMeta(p) {
const src = (p && typeof p === 'object') ? p : {};
const out = { ...MetaDefaults };
// helpers
const toInt = (v, def) => {
try {
const n = parseInt(v, 10);
return Number.isFinite(n) && n > 0 ? n : def;
} catch { return def; }
};
const toNum = (v, def) => {
try {
const n = parseFloat(v);
return !Number.isNaN(n) && n > 0 ? n : def;
} catch { return def; }
};
// базовые поля
try { out.id = String((src.id ?? out.id) || out.id); } catch {}
try { out.name = String((src.name ?? out.name) || out.name); } catch {}
out.parallel_limit = toInt(src.parallel_limit, out.parallel_limit);
out.loop_mode = String((src.loop_mode ?? out.loop_mode) || out.loop_mode);
out.loop_max_iters = toInt(src.loop_max_iters, out.loop_max_iters);
out.loop_time_budget_ms = toInt(src.loop_time_budget_ms, out.loop_time_budget_ms);
out.clear_var_store = (typeof src.clear_var_store === 'boolean') ? !!src.clear_var_store : out.clear_var_store;
out.http_timeout_sec = toNum(src.http_timeout_sec, out.http_timeout_sec);
out.text_extract_strategy = String((src.text_extract_strategy ?? out.text_extract_strategy) || out.text_extract_strategy);
out.text_extract_json_path = String((src.text_extract_json_path ?? out.text_extract_json_path) || out.text_extract_json_path);
// поддержка синонимов text_join_sep (регистр и вариации)
let joinSep = out.text_join_sep;
try {
for (const k of Object.keys(src)) {
if (String(k).toLowerCase() === 'text_join_sep') { joinSep = src[k]; break; }
}
} catch {}
out.text_join_sep = String((joinSep ?? src.text_join_sep ?? out.text_join_sep) || out.text_join_sep);
// коллекция пресетов
try {
const arr = Array.isArray(src.text_extract_presets) ? src.text_extract_presets : [];
out.text_extract_presets = arr
.filter(it => it && typeof it === 'object')
.map((it, idx) => ({
id: String((it.id ?? '') || ('p' + Date.now().toString(36) + Math.random().toString(36).slice(2) + idx)),
name: String(it.name ?? (it.json_path || 'Preset')),
strategy: String(it.strategy ?? 'auto'),
json_path: String(it.json_path ?? ''),
join_sep: String(it.join_sep ?? '\n'),
}));
} catch { out.text_extract_presets = []; }
return out;
}
function getPipelineMeta() {
return { ..._pipelineMeta };
@@ -40,48 +96,8 @@
function updatePipelineMeta(p) {
if (!p || typeof p !== 'object') return;
const keys = [
'id','name','parallel_limit','loop_mode','loop_max_iters','loop_time_budget_ms','clear_var_store','http_timeout_sec',
'text_extract_strategy','text_extract_json_path','text_join_sep','text_join_sep','text_join_SEP',
// v2 presets collection
'text_extract_presets'
];
for (const k of keys) {
if (Object.prototype.hasOwnProperty.call(p, k) && p[k] !== undefined && p[k] !== null && (k === 'clear_var_store' ? true : p[k] !== '')) {
if (k === 'parallel_limit' || k === 'loop_max_iters' || k === 'loop_time_budget_ms') {
const v = parseInt(p[k], 10);
if (!Number.isNaN(v) && v > 0) _pipelineMeta[k] = v;
} else if (k === 'http_timeout_sec') {
const fv = parseFloat(p[k]);
if (!Number.isNaN(fv) && fv > 0) _pipelineMeta[k] = fv;
} else if (k === 'clear_var_store') {
_pipelineMeta[k] = !!p[k];
} else {
// спец-обработка коллекции пресетов
if (k === 'text_extract_presets') {
try {
const arr = Array.isArray(p[k]) ? p[k] : [];
_pipelineMeta[k] = arr
.filter(it => it && typeof it === 'object')
.map(it => ({
id: String((it.id ?? '') || ('p' + Date.now().toString(36) + Math.random().toString(36).slice(2))),
name: String(it.name ?? 'Preset'),
strategy: String(it.strategy ?? 'auto'),
json_path: String(it.json_path ?? ''),
join_sep: String(it.join_sep ?? '\n'),
}));
} catch (_) {
_pipelineMeta[k] = [];
}
} else if (k.toLowerCase() === 'text_join_sep') {
// нормализация ключа join separator (допускаем разные написания)
_pipelineMeta['text_join_sep'] = String(p[k]);
} else {
_pipelineMeta[k] = String(p[k]);
}
}
}
}
// DRY: единая точка нормализации
_pipelineMeta = ensureMeta({ ..._pipelineMeta, ...p });
}
// Drawflow -> pipeline JSON
@@ -260,24 +276,10 @@
}
}
// 3) Собираем итоговый pipeline JSON с метаданными
const meta = getPipelineMeta();
return {
id: meta.id || 'pipeline_editor',
name: meta.name || 'Edited Pipeline',
parallel_limit: (typeof meta.parallel_limit === 'number' ? meta.parallel_limit : 8),
loop_mode: (meta.loop_mode || 'dag'),
loop_max_iters: (typeof meta.loop_max_iters === 'number' ? meta.loop_max_iters : 1000),
loop_time_budget_ms: (typeof meta.loop_time_budget_ms === 'number' ? meta.loop_time_budget_ms : 10000),
clear_var_store: (typeof meta.clear_var_store === 'boolean' ? meta.clear_var_store : true),
http_timeout_sec: (typeof meta.http_timeout_sec === 'number' ? meta.http_timeout_sec : 60),
text_extract_strategy: (meta.text_extract_strategy || 'auto'),
text_extract_json_path: (meta.text_extract_json_path || ''),
text_join_sep: (meta.text_join_sep || '\n'),
// v2: persist presets
text_extract_presets: (Array.isArray(meta.text_extract_presets) ? meta.text_extract_presets : []),
nodes
};
// 3) Собираем итоговый pipeline JSON с метаданными (нормализованными)
const meta = ensureMeta(getPipelineMeta());
try { console.debug('[AgentUISer.toPipelineJSON] meta_keys', Object.keys(meta || {})); } catch (e) {}
return { ...meta, nodes };
}
// pipeline JSON -> Drawflow
@@ -285,25 +287,25 @@
ensureDeps();
const editor = w.editor;
const NODE_IO = w.NODE_IO;
// Сохраняем метаданные пайплайна для UI
try {
updatePipelineMeta({
id: p && p.id ? p.id : 'pipeline_editor',
name: p && p.name ? p.name : 'Edited Pipeline',
parallel_limit: (p && typeof p.parallel_limit === 'number') ? p.parallel_limit : 8,
loop_mode: p && p.loop_mode ? p.loop_mode : 'dag',
loop_max_iters: (p && typeof p.loop_max_iters === 'number') ? p.loop_max_iters : 1000,
loop_time_budget_ms: (p && typeof p.loop_time_budget_ms === 'number') ? p.loop_time_budget_ms : 10000,
clear_var_store: (p && typeof p.clear_var_store === 'boolean') ? p.clear_var_store : true,
http_timeout_sec: (p && typeof p.http_timeout_sec === 'number') ? p.http_timeout_sec : 60,
text_extract_strategy: (p && typeof p.text_extract_strategy === 'string') ? p.text_extract_strategy : 'auto',
text_extract_json_path: (p && typeof p.text_extract_json_path === 'string') ? p.text_extract_json_path : '',
text_join_sep: (p && typeof p.text_join_sep === 'string') ? p.text_join_sep : '\n',
// v2: presets from pipeline.json
text_extract_presets: (p && Array.isArray(p.text_extract_presets)) ? p.text_extract_presets : [],
});
} catch (e) {}
// Сохраняем метаданные пайплайна для UI (сквозная нормализация)
try {
updatePipelineMeta(p || {});
// Диагностический лог состава meta для подтверждения DRY-рефакторинга
try {
const metaKeys = ["id","name","parallel_limit","loop_mode","loop_max_iters","loop_time_budget_ms","clear_var_store","http_timeout_sec","text_extract_strategy","text_extract_json_path","text_join_sep","text_extract_presets"];
const incomingKeys = metaKeys.filter(k => (p && Object.prototype.hasOwnProperty.call(p, k)));
const currentMeta = (typeof getPipelineMeta === 'function') ? getPipelineMeta() : {};
console.debug('[AgentUISer.fromPipelineJSON] meta_keys', {
incomingKeys,
resultKeys: Object.keys(currentMeta || {}),
metaPreview: {
id: currentMeta && currentMeta.id,
loop_mode: currentMeta && currentMeta.loop_mode,
http_timeout_sec: currentMeta && currentMeta.http_timeout_sec
}
});
} catch (_) {}
} catch (e) {}
editor.clear();
let x = 100; let y = 120; // Fallback

213
static/js/utils.js Normal file
View File

@@ -0,0 +1,213 @@
/* global window */
// AgentUI common UI utilities (DRY helpers shared by editor.html and pm-ui.js)
(function (w) {
'use strict';
const AU = {};
// HTML escaping for safe text/attribute insertion
AU.escapeHtml = function escapeHtml(s) {
const str = String(s ?? '');
return str
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, "'");
};
// Attribute-safe escape (keeps quotes escaped; conservative)
AU.escAttr = function escAttr(v) {
const s = String(v ?? '');
return s
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, "'");
};
// Text-node escape (keeps quotes as-is for readability)
AU.escText = function escText(v) {
const s = String(v ?? '');
return s
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
};
// DRY helper: sync Drawflow node data + mirror into DOM.__data with deep copy
AU.updateNodeDataAndDom = function updateNodeDataAndDom(editor, id, data) {
try { editor && typeof editor.updateNodeDataFromId === 'function' && editor.updateNodeDataFromId(id, data); } catch (_) {}
try {
const el = document.querySelector('#node-' + id);
if (el) el.__data = JSON.parse(JSON.stringify(data));
} catch (_) {}
};
// Double rAF helper: waits for two animation frames; returns Promise or accepts callback
AU.nextRaf2 = function nextRaf2(cb) {
try {
if (typeof requestAnimationFrame === 'function') {
if (typeof cb === 'function') {
requestAnimationFrame(() => { requestAnimationFrame(() => { try { cb(); } catch (_) {} }); });
return;
}
return new Promise((resolve) => requestAnimationFrame(() => requestAnimationFrame(() => resolve())));
} else {
if (typeof cb === 'function') { setTimeout(() => { try { cb(); } catch (_) {} }, 32); return; }
return new Promise((resolve) => setTimeout(resolve, 32));
}
} catch (_) {
if (typeof cb === 'function') { try { cb(); } catch (__ ) {} }
return Promise.resolve();
}
};
// Heuristic: looks like long base64 payload
AU.isProbablyBase64 = function isProbablyBase64(s) {
try {
if (typeof s !== 'string') return false;
if (s.length < 64) return false;
return /^[A-Za-z0-9+/=\r\n]+$/.test(s);
} catch { return false; }
};
AU.trimBase64 = function trimBase64(s, maxLen = 180) {
try {
const str = String(s ?? '');
if (str.length > maxLen) {
return str.slice(0, maxLen) + `... (trimmed ${str.length - maxLen})`;
}
return str;
} catch { return String(s ?? ''); }
};
// Flatten JSON-like object into [path, stringValue] pairs
// Includes special handling for backend preview objects: { "__truncated__": true, "preview": "..." }
AU.flattenObject = function flattenObject(obj, prefix = '') {
const out = [];
if (obj == null) return out;
if (typeof obj !== 'object') {
out.push([prefix, String(obj)]);
return out;
}
try {
const entries = Object.entries(obj);
for (const [k, v] of entries) {
const p = prefix ? `${prefix}.${k}` : k;
if (v && typeof v === 'object' && !Array.isArray(v)) {
// Special preview shape from backend
if (Object.prototype.hasOwnProperty.call(v, '__truncated__') && Object.prototype.hasOwnProperty.call(v, 'preview')) {
out.push([p, String(v.preview ?? '')]);
continue;
}
out.push(...AU.flattenObject(v, p));
} else {
try {
const s = (typeof v === 'string') ? v : JSON.stringify(v, null, 0);
out.push([p, s]);
} catch {
out.push([p, String(v)]);
}
}
}
} catch {
// Fallback best-effort
try { out.push([prefix, JSON.stringify(obj)]); } catch { out.push([prefix, String(obj)]); }
}
return out;
};
// Format headers dictionary into text lines "Key: Value"
AU.fmtHeaders = function fmtHeaders(h) {
try {
const keys = Object.keys(h || {});
return keys.map(k => `${k}: ${String(h[k])}`).join('\n');
} catch { return ''; }
};
// Build HTTP request preview text
AU.buildReqText = function buildReqText(x) {
if (!x) return '';
const head = `${x.method || 'POST'} ${x.url || '/'} HTTP/1.1`;
const host = (() => {
try { const u = new URL(x.url); return `Host: ${u.host}`; } catch { return ''; }
})();
const hs = AU.fmtHeaders(x.headers || {});
const body = String(x.body_text || '').trim();
return [head, host, hs, '', body].filter(Boolean).join('\n');
};
// Build HTTP response preview text
AU.buildRespText = function buildRespText(x) {
if (!x) return '';
const head = `HTTP/1.1 ${x.status || 0}`;
const hs = AU.fmtHeaders(x.headers || {});
const body = String(x.body_text || '').trim();
return [head, hs, '', body].filter(Boolean).join('\n');
};
// Unified fetch helper with timeout and JSON handling
AU.apiFetch = async function apiFetch(url, opts) {
const t0 = (typeof performance !== 'undefined' && performance.now) ? performance.now() : Date.now();
const o = opts || {};
const method = String(o.method || 'GET').toUpperCase();
const expectJson = (o.expectJson !== false); // default true
const headers = Object.assign({}, o.headers || {});
let body = o.body;
const timeoutMs = Number.isFinite(o.timeoutMs) ? o.timeoutMs : 15000;
const hasAbort = (typeof AbortController !== 'undefined');
const ctrl = hasAbort ? new AbortController() : null;
let to = null;
if (ctrl) {
try { to = setTimeout(() => { try { ctrl.abort(); } catch(_){} }, timeoutMs); } catch(_) {}
}
try {
if (expectJson) {
if (!headers['Accept'] && !headers['accept']) headers['Accept'] = 'application/json';
}
if (body != null) {
const isForm = (typeof FormData !== 'undefined' && body instanceof FormData);
const isBlob = (typeof Blob !== 'undefined' && body instanceof Blob);
if (typeof body === 'object' && !isForm && !isBlob) {
body = JSON.stringify(body);
if (!headers['Content-Type'] && !headers['content-type']) headers['Content-Type'] = 'application/json';
}
}
const res = await fetch(url, { method, headers, body, signal: ctrl ? ctrl.signal : undefined });
const ct = String(res.headers && res.headers.get ? (res.headers.get('Content-Type') || '') : '');
const isJsonCt = /application\/json/i.test(ct);
let data = null;
if (expectJson || isJsonCt) {
try { data = await res.json(); } catch (_) { data = null; }
} else {
try { data = await res.text(); } catch (_) { data = null; }
}
const t1 = (typeof performance !== 'undefined' && performance.now) ? performance.now() : Date.now();
try { console.debug('[AU.apiFetch]', { method, url, status: res.status, ms: Math.round(t1 - t0) }); } catch(_) {}
if (!res.ok) {
const msg = (data && typeof data === 'object' && data.error) ? String(data.error) : `HTTP ${res.status}`;
const err = new Error(`apiFetch: ${msg}`);
err.status = res.status;
err.data = data;
err.url = url;
throw err;
}
return data;
} finally {
if (to) { try { clearTimeout(to); } catch(_) {} }
}
};
// Expose
try { w.AU = AU; } catch (_) {}
try { w.nextRaf2 = AU.nextRaf2; } catch (_) {}
})(window);

View File

@@ -4,6 +4,12 @@
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>НадTavern — Pipeline Editor (JSON)</title>
<link rel="icon" href="/favicon.ico" />
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png" />
<link rel="apple-touch-icon" href="/apple-touch-icon.png" />
<link rel="manifest" href="/site.webmanifest" />
<meta name="theme-color" content="#ffffff" />
<style>
body { font-family: Arial, sans-serif; margin: 24px; }
textarea { width: 100%; height: 70vh; }

1
tests/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Make tests a package so imports like "from tests.utils import ..." work.

View File

@@ -1,25 +1,9 @@
import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor, ExecutionError, Node, NODE_REGISTRY
from tests.utils import pp as _pp, base_ctx as _base_ctx
# Helper to pretty print short JSON safely
def _pp(obj, max_len=800):
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def _base_ctx(vendor="openai"):
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": {"temperature": 0.1},
"chat": {"last_user": "hi"},
"OUT": {},
}
async def scenario_if_single_quotes_ok():
print("\n=== SCENARIO 1: If with single quotes ===")

View File

@@ -1,33 +1,8 @@
import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor
from agentui.pipeline.storage import clear_var_store
from tests.utils import pp as _pp, ctx as _ctx
def _pp(obj, max_len=800):
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def _ctx(vendor="openai", incoming=None, params=None):
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": params or {"temperature": 0.25},
"chat": {"last_user": "Привет"},
"OUT": {},
"incoming": incoming or {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {"x": "X-HEADER"},
"json": {},
},
}
async def scenario_bare_vars_and_braces():
print("\n=== MACROS 1: Bare [[NAME]] и {{ NAME }} + числа/объекты без кавычек ===")
@@ -63,6 +38,7 @@ async def scenario_bare_vars_and_braces():
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
async def scenario_var_path_and_defaults():
print("\n=== MACROS 2: [[VAR:path]] и {{ ...|default(...) }} (вложенные и JSON-литералы) ===")
incoming = {
@@ -101,6 +77,7 @@ async def scenario_var_path_and_defaults():
out = await PipelineExecutor(p).run(_ctx(incoming=incoming, params={"temperature": 0.2}))
print("OUT:", _pp(out))
async def scenario_out_macros_full_and_short():
print("\n=== MACROS 3: [[OUT:nX...]] и короткая форма [[OUTx]] ===")
p = {
@@ -142,6 +119,7 @@ async def scenario_out_macros_full_and_short():
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
async def scenario_store_macros_two_runs():
print("\n=== MACROS 4: [[STORE:key]] и {{ STORE.key }} между запусками (clear_var_store=False) ===")
pid = "p_macros_4_store"
@@ -198,6 +176,7 @@ async def scenario_store_macros_two_runs():
out2 = await PipelineExecutor(p2).run(_ctx())
print("RUN2:", _pp(out2))
async def scenario_pm_prompt_blocks_to_provider_structs():
print("\n=== MACROS 5: Prompt Blocks ([[PROMPT]]) → provider-structures (OpenAI) ===")
# Проверяем, что [[PROMPT]] со списком блоков превращается в "messages":[...]
@@ -232,6 +211,7 @@ async def scenario_pm_prompt_blocks_to_provider_structs():
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
def run_all():
async def main():
await scenario_bare_vars_and_braces()
@@ -242,5 +222,6 @@ def run_all():
print("\n=== MACROS VARS SUITE: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()

View File

@@ -0,0 +1,249 @@
import asyncio
import json
from typing import Any, Dict, List
from agentui.pipeline.executor import PipelineExecutor
import agentui.providers.http_client as hc
from tests.utils import ctx as _ctx, pp as _pp
# Capture of all outbound ProviderCall HTTP requests (one per run)
CAPTURED: List[Dict[str, Any]] = []
class DummyResponse:
def __init__(self, status_code: int = 200, body: Dict[str, Any] | None = None):
self.status_code = status_code
self._json = body if body is not None else {"ok": True}
self.headers = {}
try:
self.content = json.dumps(self._json, ensure_ascii=False).encode("utf-8")
except Exception:
self.content = b"{}"
try:
self.text = json.dumps(self._json, ensure_ascii=False)
except Exception:
self.text = "{}"
def json(self) -> Any:
return self._json
class DummyClient:
def __init__(self, capture: List[Dict[str, Any]], status_code: int = 200):
self._capture = capture
self._status = status_code
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return False
async def post(self, url: str, content: bytes, headers: Dict[str, str]):
try:
payload = json.loads(content.decode("utf-8"))
except Exception:
payload = {"_raw": content.decode("utf-8", errors="ignore")}
rec = {"url": url, "headers": headers, "payload": payload}
self._capture.append(rec)
# Echo payload back to keep extractor happy but not tied to vendor formats
return DummyResponse(self._status, {"echo": rec})
# RawForward may use .request, but we don't need it here
async def request(self, method: str, url: str, headers: Dict[str, str], content: bytes | None):
return await self.post(url, content or b"{}", headers)
def _patch_http_client():
"""Monkeypatch build_client used by ProviderCall to our dummy."""
hc.build_client = lambda timeout=60.0: DummyClient(CAPTURED, 200) # type: ignore[assignment]
# Также патчим символ, импортированный внутрь executor, чтобы ProviderCall использовал DummyClient
import agentui.pipeline.executor as ex # type: ignore
ex.build_client = lambda timeout=60.0: DummyClient(CAPTURED, 200) # type: ignore
def _mk_pipeline(provider: str, prompt_combine: str) -> Dict[str, Any]:
"""Build a minimal ProviderCall-only pipeline for a given provider and combine spec."""
provider = provider.lower().strip()
if provider not in {"openai", "gemini", "claude"}:
raise AssertionError(f"Unsupported provider in test: {provider}")
base_url = "http://mock.local"
if provider == "openai":
endpoint = "/v1/chat/completions"
template = '{ "model": "{{ model }}", [[PROMPT]] }'
elif provider == "gemini":
endpoint = "/v1beta/models/{{ model }}:generateContent"
template = '{ "model": "{{ model }}", [[PROMPT]] }'
else: # claude
endpoint = "/v1/messages"
template = '{ "model": "{{ model }}", [[PROMPT]] }'
p = {
"id": f"p_prompt_combine_{provider}",
"name": f"prompt_combine to {provider}",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "ProviderCall",
"config": {
"provider": provider,
"provider_configs": {
provider: {
"base_url": base_url,
"endpoint": endpoint,
"headers": "{}",
"template": template,
}
},
# Key under test:
"prompt_combine": prompt_combine,
# Prompt Blocks (PROMPT)
"blocks": [
{"id": "b1", "name": "sys", "role": "system", "prompt": "Ты — Narrator-chan.", "enabled": True, "order": 0},
{"id": "b2", "name": "user", "role": "user", "prompt": "как лела", "enabled": True, "order": 1},
],
},
"in": {},
}
],
}
return p
def _ctx_with_incoming(incoming_json: Dict[str, Any], vendor: str = "openai") -> Dict[str, Any]:
base = _ctx(vendor=vendor)
inc = dict(base["incoming"])
inc["json"] = incoming_json
base["incoming"] = inc
return base
async def scenario_openai_target_from_gemini_contents():
print("\n=== PROMPT_COMBINE 1: target=openai, incoming=gemini.contents & PROMPT ===")
_patch_http_client()
CAPTURED.clear()
# Incoming JSON in Gemini shape
incoming_json = {
"contents": [
{"role": "user", "parts": [{"text": "Прив"}]},
{"role": "model", "parts": [{"text": "И тебе привет!"}]},
]
}
p = _mk_pipeline("openai", "[[VAR:incoming.json.contents]] & [[PROMPT]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="gemini"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
req = CAPTURED[-1]
payload = req["payload"]
# Validate OpenAI body
assert "messages" in payload, "OpenAI payload must contain messages"
msgs = payload["messages"]
# Expected: 2 (converted Gemini) + 2 (PROMPT blocks system+user) = 4
assert isinstance(msgs, list) and len(msgs) == 4
roles = [m.get("role") for m in msgs]
# Gemini model -> OpenAI assistant
assert "assistant" in roles and "user" in roles
# PROMPT system+user present (system may be not first without @pos; we just ensure existence)
assert any(m.get("role") == "system" for m in msgs), "System message from PROMPT must be present"
async def scenario_gemini_target_from_openai_messages():
print("\n=== PROMPT_COMBINE 2: target=gemini, incoming=openai.messages & PROMPT ===")
_patch_http_client()
CAPTURED.clear()
incoming_json = {
"messages": [
{"role": "system", "content": "Системный-тест из входящего"},
{"role": "user", "content": "Its just me.."},
{"role": "assistant", "content": "Reply from model"},
]
}
p = _mk_pipeline("gemini", "[[VAR:incoming.json.messages]] & [[PROMPT]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="openai"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
payload = CAPTURED[-1]["payload"]
# Validate Gemini body
assert "contents" in payload, "Gemini payload must contain contents"
cnts = payload["contents"]
assert isinstance(cnts, list)
# PROMPT system goes to systemInstruction, user block goes to contents
assert "systemInstruction" in payload, "Gemini payload must contain systemInstruction when system text exists"
si = payload["systemInstruction"]
# SystemInstruction.parts[].text must include both incoming system and PROMPT system merged
si_texts = []
try:
for prt in si.get("parts", []):
t = prt.get("text")
if isinstance(t, str) and t.strip():
si_texts.append(t.strip())
except Exception:
pass
joined = "\n".join(si_texts)
assert "Системный-тест из входящего" in joined, "Incoming system must be merged into systemInstruction"
assert "Narrator-chan" in joined, "PROMPT system must be merged into systemInstruction"
async def scenario_claude_target_from_openai_messages():
print("\n=== PROMPT_COMBINE 3: target=claude, incoming=openai.messages & PROMPT ===")
_patch_http_client()
CAPTURED.clear()
incoming_json = {
"messages": [
{"role": "system", "content": "Системный-тест CLAUDE"},
{"role": "user", "content": "Прив"},
{"role": "assistant", "content": "Привет!"},
]
}
p = _mk_pipeline("claude", "[[VAR:incoming.json.messages]] & [[PROMPT]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="openai"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
payload = CAPTURED[-1]["payload"]
# Validate Claude body
assert "messages" in payload, "Claude payload must contain messages"
assert "system" in payload, "Claude payload must contain system blocks"
sys_blocks = payload["system"]
# system must be array of blocks with type=text
assert isinstance(sys_blocks, list) and any(isinstance(b, dict) and b.get("type") == "text" for b in sys_blocks)
sys_text_join = "\n".join([b.get("text") for b in sys_blocks if isinstance(b, dict) and isinstance(b.get("text"), str)])
assert "Системный-тест CLAUDE" in sys_text_join, "Incoming system should be present"
assert "Narrator-chan" in sys_text_join, "PROMPT system should be present"
async def scenario_prepend_positioning_openai():
print("\n=== PROMPT_COMBINE 4: target=openai, PROMPT@pos=prepend & incoming.contents ===")
_patch_http_client()
CAPTURED.clear()
incoming_json = {
"contents": [
{"role": "user", "parts": [{"text": "A"}]},
{"role": "model", "parts": [{"text": "B"}]},
]
}
# Put PROMPT first; ensure system message becomes first in messages
p = _mk_pipeline("openai", "[[PROMPT]]@pos=prepend & [[VAR:incoming.json.contents]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="gemini"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
payload = CAPTURED[-1]["payload"]
msgs = payload.get("messages", [])
assert isinstance(msgs, list) and len(msgs) >= 2
first = msgs[0]
# Expect first to be system (from PROMPT) due to prepend
assert first.get("role") == "system", f"Expected system as first message, got {first}"
def test_prompt_combine_all():
async def main():
await scenario_openai_target_from_gemini_contents()
await scenario_gemini_target_from_openai_messages()
await scenario_claude_target_from_openai_messages()
await scenario_prepend_positioning_openai()
print("\n=== PROMPT_COMBINE: DONE ===")
asyncio.run(main())

View File

@@ -0,0 +1,23 @@
# Pytest-обёртка для существующих сценариев, которые сами себя запускают через run_all()/run_checks()
# Позволяет запускать все тесты одной командой: python -m pytest -q
# Не меняем исходные файлы, просто вызываем их публичные функции из pytest-тестов.
def test_executor_iterative():
# tests/test_executor_iterative.py содержит run_checks() (внутри сам asyncio.run)
from tests.test_executor_iterative import run_checks
run_checks()
def test_edge_cases():
# tests/test_edge_cases.py содержит run_all() (внутри сам asyncio.run)
from tests.test_edge_cases import run_all
run_all()
def test_macros_and_vars():
# tests/test_macros_vars.py содержит run_all() (внутри сам asyncio.run)
from tests.test_macros_vars import run_all
run_all()
def test_while_nodes():
# наш новый набор сценариев; внутри есть run_all() со своим asyncio.run
from tests.test_while_nodes import run_all
run_all()

134
tests/test_while_nodes.py Normal file
View File

@@ -0,0 +1,134 @@
import asyncio
from agentui.pipeline.executor import PipelineExecutor
from tests.utils import ctx as _ctx
async def scenario_providercall_while_ignore():
# ProviderCall with while loop and ignore_errors enabled.
# No base_url is provided to force ExecutionError inside node.run();
# wrapper will catch it and expose {"error": "..."} plus vars.
p = {
"id": "p_pc_while_ignore",
"name": "ProviderCall while+ignore",
"loop_mode": "dag",
"nodes": [
{
"id": "n2",
"type": "ProviderCall",
"config": {
"provider": "openai",
# while: 3 iterations (0,1,2)
"while_expr": "cycleindex < 3",
"while_max_iters": 10,
"ignore_errors": True,
# no base_url / provider_configs to trigger error safely
},
"in": {}
}
]
}
out = await PipelineExecutor(p).run(_ctx())
assert isinstance(out, dict)
# Wrapper returns final out with .vars merged by executor into STORE as well, but we assert on node out.
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Final iteration index should be 2
assert vars_map.get("WAS_ERROR__n2") is True
assert vars_map.get("CYCLEINDEX__n2") == 2
async def scenario_rawforward_while_ignore():
# RawForward with while loop and ignore_errors enabled.
# No base_url and incoming.json is a plain string -> detect_vendor=unknown -> ExecutionError,
# wrapper catches and returns {"error": "..."} with vars set.
p = {
"id": "p_rf_while_ignore",
"name": "RawForward while+ignore",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "RawForward",
"config": {
"while_expr": "cycleindex < 2",
"while_max_iters": 10,
"ignore_errors": True,
# no base_url; vendor detect will fail on plain text
},
"in": {}
}
]
}
ctx = _ctx()
# Provide incoming as plain text-like JSON so detect_vendor returns unknown
ctx["incoming"] = {
"method": "POST",
"url": "http://example.local/test",
"path": "/test",
"query": "",
"headers": {"content-type": "text/plain"},
"json": "raw-plain-body-simulated"
}
out = await PipelineExecutor(p).run(ctx)
assert isinstance(out, dict)
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Final iteration index should be 1 (0 and 1)
assert vars_map.get("WAS_ERROR__n1") is True
assert vars_map.get("CYCLEINDEX__n1") == 1
async def scenario_providercall_while_with_out_macro():
# SetVars -> ProviderCall while uses OUT from n1 in expression
# Expression: ([[OUT:n1.vars.MSG]] contains "123") && (cycleindex < 2)
# Ignore errors to bypass real HTTP
p = {
"id": "p_pc_while_out_macro",
"name": "ProviderCall while with OUT macro",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "abc123xyz"}
]
},
"in": {}
},
{
"id": "n2",
"type": "ProviderCall",
"config": {
"provider": "openai",
"while_expr": "([[OUT:n1.vars.MSG]] contains \"123\") && (cycleindex < 2)",
"while_max_iters": 10,
"ignore_errors": True
},
"in": {
"depends": "n1.done"
}
}
]
}
out = await PipelineExecutor(p).run(_ctx())
assert isinstance(out, dict)
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Since MSG contains "123" and cycleindex < 2, two iterations (0,1)
assert vars_map.get("WAS_ERROR__n2") is True
assert vars_map.get("CYCLEINDEX__n2") == 1
def run_all():
async def main():
await scenario_providercall_while_ignore()
await scenario_rawforward_while_ignore()
await scenario_providercall_while_with_out_macro()
print("\n=== WHILE_NODES: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()

52
tests/utils.py Normal file
View File

@@ -0,0 +1,52 @@
from __future__ import annotations
import json
from typing import Any, Dict, Optional
def pp(obj: Any, max_len: int = 800) -> str:
"""
Pretty-print JSON-like objects in tests with length guard.
"""
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def base_ctx(vendor: str = "openai") -> Dict[str, Any]:
"""
Base context used by edge-case tests (mirrors previous _base_ctx).
"""
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": {"temperature": 0.1},
"chat": {"last_user": "hi"},
"OUT": {},
}
def ctx(vendor: str = "openai", incoming: Optional[Dict[str, Any]] = None, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
General context used by macros/vars tests (mirrors previous _ctx).
"""
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": params or {"temperature": 0.25},
"chat": {"last_user": "Привет"},
"OUT": {},
"incoming": incoming
or {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {"x": "X-HEADER"},
"json": {},
},
}