This commit is contained in:
2025-09-11 17:27:15 +03:00
parent 3c77c3dc2e
commit 11a0535712
32 changed files with 4682 additions and 442 deletions

313
tests/test_edge_cases.py Normal file
View File

@@ -0,0 +1,313 @@
import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor, ExecutionError, Node, NODE_REGISTRY
# Helper to pretty print short JSON safely
def _pp(obj, max_len=800):
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def _base_ctx(vendor="openai"):
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": {"temperature": 0.1},
"chat": {"last_user": "hi"},
"OUT": {},
}
async def scenario_if_single_quotes_ok():
print("\n=== SCENARIO 1: If with single quotes ===")
p = {
"id": "p_if_single_quotes",
"name": "If Single Quotes",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
"expr": "[[MSG]] contains 'Hello'"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nRet",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "ok"
},
"in": {
"depends": "nIf.true"
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("OK:", _pp(out))
except Exception as e:
print("ERR:", type(e).__name__, str(e))
async def scenario_if_error_logging():
print("\n=== SCENARIO 2: If with unterminated string (expect error log) ===")
p = {
"id": "p_if_bad_string",
"name": "If Bad String",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
# Missing closing quote to force tokenizer error
"expr": "[[MSG]] contains 'Hello"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nRet",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "should_not_run"
},
"in": {
"depends": "nIf.true"
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("UNEXPECTED_OK:", _pp(out))
except Exception as e:
print("EXPECTED_ERROR:", type(e).__name__, str(e))
async def scenario_multi_depends_array():
print("\n=== SCENARIO 3: multi-depends array ===")
# n2 and n3 both depend on n1; n4 depends on [n2.done, n3.done]
p = {
"id": "p_multi_depends",
"name": "Multi Depends",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "A", "mode": "string", "value": "foo"},
{"id": "v2", "name": "B", "mode": "string", "value": "bar"}
]
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[A]]"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n3",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[B]]"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n4",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[OUT2]] + [[OUT3]]"
},
"in": {
"depends": ["n2.done", "n3.done"]
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("OK:", _pp(out))
except Exception as e:
print("ERR:", type(e).__name__, str(e))
async def scenario_gate_only_dependency():
print("\n=== SCENARIO 4: gate-only dependency (no real parents) ===")
# nThen depends only on nIf.true (should run only when gate becomes true)
p = {
"id": "p_gate_only",
"name": "Gate Only",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "FLAG", "mode": "string", "value": "yes"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
"expr": "[[FLAG]] == 'yes'"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nThen",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "then-branch"
},
"in": {
# gate-only
"depends": "nIf.true"
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("OK:", _pp(out))
except Exception as e:
print("ERR:", type(e).__name__, str(e))
async def scenario_provider_prompt_empty_json_error():
print("\n=== SCENARIO 5: ProviderCall with empty PROMPT causing JSON error (collect logs) ===")
# Template has [[PROMPT]] surrounded by commas; blocks are empty => PROMPT = ""
# Resulting JSON likely invalid -> ExecutionError expected before any network call.
p = {
"id": "p_prompt_empty",
"name": "Prompt Empty JSON Error",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "ProviderCall",
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer TEST\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": 0.1\n}"
}
},
"blocks": [] # empty -> PROMPT empty
},
"in": {}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("UNEXPECTED_OK:", _pp(out))
except Exception as e:
print("EXPECTED_ERROR:", type(e).__name__, str(e))
async def scenario_rawforward_vendor_unknown():
print("\n=== SCENARIO 6: RawForward vendor unknown (non-JSON body simulated) ===")
# We simulate incoming.json as a plain string that doesn't look like any known vendor payload.
# RawForward will try vendor detect, fail and raise ExecutionError (collect logs, do not fix).
p = {
"id": "p_rawforward_unknown",
"name": "RawForward Unknown",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "RawForward",
"config": {
# No base_url -> autodetect vendor from incoming.json (will fail)
"passthrough_headers": True,
"extra_headers": "{}"
},
"in": {}
}
]
}
exe = PipelineExecutor(p)
ctx = _base_ctx()
ctx["incoming"] = {
"method": "POST",
"url": "http://example.test/whatever",
"path": "/whatever",
"query": "",
"headers": {"Content-Type": "text/plain"},
"json": "raw-plain-body-simulated" # NOT JSON object -> detect_vendor -> unknown
}
try:
out = await exe.run(ctx)
print("UNEXPECTED_OK:", _pp(out))
except Exception as e:
print("EXPECTED_ERROR:", type(e).__name__, str(e))
def run_all():
async def main():
await scenario_if_single_quotes_ok()
await scenario_if_error_logging()
await scenario_multi_depends_array()
await scenario_gate_only_dependency()
await scenario_provider_prompt_empty_json_error()
await scenario_rawforward_vendor_unknown()
print("\n=== EDGE CASES: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()

View File

@@ -0,0 +1,167 @@
import asyncio
from agentui.pipeline.executor import PipelineExecutor, ExecutionError, Node, NODE_REGISTRY
def run_checks():
async def scenario():
# Test 1: linear pipeline in iterative mode (SetVars -> Return)
p1 = {
"id": "pipeline_test_iter_1",
"name": "Iterative Linear",
"loop_mode": "iterative",
"loop_max_iters": 100,
"loop_time_budget_ms": 5000,
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello"}
]
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[MSG]]"
},
"in": {
"depends": "n1.done"
}
}
]
}
ctx = {
"model": "gpt-x",
"vendor_format": "openai",
"params": {},
"chat": {"last_user": "hi"},
"OUT": {}
}
ex1 = PipelineExecutor(p1)
out1 = await ex1.run(ctx)
assert isinstance(out1, dict) and "result" in out1
res1 = out1["result"]
# OpenAI-like object from Return formatter
assert res1.get("object") == "chat.completion"
msg1 = res1.get("choices", [{}])[0].get("message", {}).get("content")
assert msg1 == "Hello"
# Test 2: If gating in iterative mode (SetVars -> If -> Return(true))
p2 = {
"id": "pipeline_test_iter_2",
"name": "Iterative If Gate",
"loop_mode": "iterative",
"loop_max_iters": 100,
"loop_time_budget_ms": 5000,
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello world"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
"expr": '[[MSG]] contains "Hello"'
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nRet",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[MSG]] ok"
},
"in": {
"depends": "nIf.true"
}
}
]
}
ex2 = PipelineExecutor(p2)
out2 = await ex2.run(ctx)
assert "result" in out2
res2 = out2["result"]
assert res2.get("object") == "chat.completion"
msg2 = res2.get("choices", [{}])[0].get("message", {}).get("content")
assert msg2 == "Hello world ok"
# Test 3: [[OUT:...]] is treated as a real dependency in iterative mode
class ProbeNode(Node):
type_name = "Probe"
async def run(self, inputs, context):
x = inputs.get("x")
assert isinstance(x, dict) and isinstance(x.get("vars"), dict)
v = x["vars"].get("MSG")
assert v == "Hello OUT"
return {"vars": {"X_MSG": v}}
# Register probe node
NODE_REGISTRY[ProbeNode.type_name] = ProbeNode
p3 = {
"id": "pipeline_test_iter_3",
"name": "Iterative OUT dependency",
"loop_mode": "iterative",
"loop_max_iters": 100,
"loop_time_budget_ms": 5000,
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello OUT"}
]
},
"in": {}
},
{
"id": "n2",
"type": "Probe",
"config": {},
"in": {
"x": "[[OUT:n1]]"
}
},
{
"id": "n3",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[VAR:vars.X_MSG]]"
},
"in": {
"depends": "n2.done"
}
}
]
}
ex3 = PipelineExecutor(p3)
out3 = await ex3.run(ctx)
assert "result" in out3
res3 = out3["result"]
assert res3.get("object") == "chat.completion"
msg3 = res3.get("choices", [{}])[0].get("message", {}).get("content")
assert msg3 == "Hello OUT"
asyncio.run(scenario())
print("Iterative executor tests: OK")
if __name__ == "__main__":
run_checks()

246
tests/test_macros_vars.py Normal file
View File

@@ -0,0 +1,246 @@
import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor
from agentui.pipeline.storage import clear_var_store
def _pp(obj, max_len=800):
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def _ctx(vendor="openai", incoming=None, params=None):
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": params or {"temperature": 0.25},
"chat": {"last_user": "Привет"},
"OUT": {},
"incoming": incoming or {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {"x": "X-HEADER"},
"json": {},
},
}
async def scenario_bare_vars_and_braces():
print("\n=== MACROS 1: Bare [[NAME]] и {{ NAME }} + числа/объекты без кавычек ===")
p = {
"id": "p_macros_1",
"name": "Bare and Braces",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "STR", "mode": "string", "value": "строка"},
{"id": "v2", "name": "NUM", "mode": "expr", "value": "42"},
{"id": "v3", "name": "OBJ", "mode": "expr", "value": '{"x": 1, "y": [2,3]}'},
]
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"config": {
"target_format": "openai",
# Вставляем: строка из [[STR]], число через {{ NUM }}, и словарь через {{ OBJ }}
"text_template": "[[STR]] | {{ NUM }} | {{ OBJ }}"
},
"in": { "depends": "n1.done" }
}
]
}
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
async def scenario_var_path_and_defaults():
print("\n=== MACROS 2: [[VAR:path]] и {{ ...|default(...) }} (вложенные и JSON-литералы) ===")
incoming = {
"method": "POST",
"url": "http://localhost/test?foo=bar",
"path": "/test",
"query": "foo=bar",
"headers": {"authorization": "Bearer X", "x-api-key": "Y"},
"json": {"a": None}
}
p = {
"id": "p_macros_2",
"name": "VAR and defaults",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": (
"auth=[[VAR:incoming.headers.authorization]] | "
"xkey=[[VAR:incoming.headers.x-api-key]] | "
# nested default: params.a|default(123) -> если param не задан, 123
"num={{ params.a|default(123) }} | "
# deeper default chain: incoming.json.a|default(params.a|default(456))
"num2={{ incoming.json.a|default(params.a|default(456)) }} | "
# JSON literal default list/object
"lit_list={{ missing|default([1,2,3]) }} | lit_obj={{ missing2|default({\"k\":10}) }}"
)
},
"in": {}
}
]
}
out = await PipelineExecutor(p).run(_ctx(incoming=incoming, params={"temperature": 0.2}))
print("OUT:", _pp(out))
async def scenario_out_macros_full_and_short():
print("\n=== MACROS 3: [[OUT:nX...]] и короткая форма [[OUTx]] ===")
p = {
"id": "p_macros_3",
"name": "OUT full and short",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "hello"}
]
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[MSG]]"
},
"in": { "depends": "n1.done" }
},
{
"id": "n3",
"type": "Return",
"config": {
"target_format": "openai",
# Две формы: полная от n1.vars.MSG и короткая от n2 => [[OUT2]]
"text_template": "[[OUT:n1.vars.MSG]] + [[OUT2]]"
},
"in": { "depends": "n2.done" }
}
]
}
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
async def scenario_store_macros_two_runs():
print("\n=== MACROS 4: [[STORE:key]] и {{ STORE.key }} между запусками (clear_var_store=False) ===")
pid = "p_macros_4_store"
# начинаем с чистого стора
clear_var_store(pid)
p = {
"id": pid,
"name": "STORE across runs",
"loop_mode": "iterative",
"clear_var_store": False, # критично: не очищать между запусками
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "KEEP", "mode": "string", "value": "persist-me"}
]
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "first-run"
},
"in": { "depends": "n1.done" }
}
]
}
# Первый запуск — кладём KEEP в STORE
out1 = await PipelineExecutor(p).run(_ctx())
print("RUN1:", _pp(out1))
# Второй запуск — читаем из STORE через макросы
p2 = {
"id": pid,
"name": "STORE read",
"loop_mode": "dag",
"clear_var_store": False,
"nodes": [
{
"id": "n1",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[STORE:KEEP]] | {{ STORE.KEEP }}"
},
"in": {}
}
]
}
out2 = await PipelineExecutor(p2).run(_ctx())
print("RUN2:", _pp(out2))
async def scenario_pm_prompt_blocks_to_provider_structs():
print("\n=== MACROS 5: Prompt Blocks ([[PROMPT]]) → provider-structures (OpenAI) ===")
# Проверяем, что [[PROMPT]] со списком блоков превращается в "messages":[...]
p = {
"id": "p_macros_5",
"name": "PROMPT OpenAI",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "ProviderCall",
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer TEST\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ params.temperature|default(0.7) }}\n}"
}
},
"blocks": [
{"id": "b1", "name": "sys", "role": "system", "prompt": "You are test", "enabled": True, "order": 0},
{"id": "b2", "name": "user", "role": "user", "prompt": "Say [[VAR:chat.last_user]]", "enabled": True, "order": 1}
]
},
"in": {}
}
]
}
# Сборка и запрос (ожидаемый 401), главное — валидное тело с messages:[...]
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
def run_all():
async def main():
await scenario_bare_vars_and_braces()
await scenario_var_path_and_defaults()
await scenario_out_macros_full_and_short()
await scenario_store_macros_two_runs()
await scenario_pm_prompt_blocks_to_provider_structs()
print("\n=== MACROS VARS SUITE: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()