Files
HadTavern/tests/test_edge_cases.py

297 lines
9.2 KiB
Python

import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor, ExecutionError, Node, NODE_REGISTRY
from tests.utils import pp as _pp, base_ctx as _base_ctx
async def scenario_if_single_quotes_ok():
print("\n=== SCENARIO 1: If with single quotes ===")
p = {
"id": "p_if_single_quotes",
"name": "If Single Quotes",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
"expr": "[[MSG]] contains 'Hello'"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nRet",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "ok"
},
"in": {
"depends": "nIf.true"
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("OK:", _pp(out))
except Exception as e:
print("ERR:", type(e).__name__, str(e))
async def scenario_if_error_logging():
print("\n=== SCENARIO 2: If with unterminated string (expect error log) ===")
p = {
"id": "p_if_bad_string",
"name": "If Bad String",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "Hello"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
# Missing closing quote to force tokenizer error
"expr": "[[MSG]] contains 'Hello"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nRet",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "should_not_run"
},
"in": {
"depends": "nIf.true"
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("UNEXPECTED_OK:", _pp(out))
except Exception as e:
print("EXPECTED_ERROR:", type(e).__name__, str(e))
async def scenario_multi_depends_array():
print("\n=== SCENARIO 3: multi-depends array ===")
# n2 and n3 both depend on n1; n4 depends on [n2.done, n3.done]
p = {
"id": "p_multi_depends",
"name": "Multi Depends",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "A", "mode": "string", "value": "foo"},
{"id": "v2", "name": "B", "mode": "string", "value": "bar"}
]
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[A]]"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n3",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[B]]"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n4",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "[[OUT2]] + [[OUT3]]"
},
"in": {
"depends": ["n2.done", "n3.done"]
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("OK:", _pp(out))
except Exception as e:
print("ERR:", type(e).__name__, str(e))
async def scenario_gate_only_dependency():
print("\n=== SCENARIO 4: gate-only dependency (no real parents) ===")
# nThen depends only on nIf.true (should run only when gate becomes true)
p = {
"id": "p_gate_only",
"name": "Gate Only",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "FLAG", "mode": "string", "value": "yes"}
]
},
"in": {}
},
{
"id": "nIf",
"type": "If",
"config": {
"expr": "[[FLAG]] == 'yes'"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "nThen",
"type": "Return",
"config": {
"target_format": "openai",
"text_template": "then-branch"
},
"in": {
# gate-only
"depends": "nIf.true"
}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("OK:", _pp(out))
except Exception as e:
print("ERR:", type(e).__name__, str(e))
async def scenario_provider_prompt_empty_json_error():
print("\n=== SCENARIO 5: ProviderCall with empty PROMPT causing JSON error (collect logs) ===")
# Template has [[PROMPT]] surrounded by commas; blocks are empty => PROMPT = ""
# Resulting JSON likely invalid -> ExecutionError expected before any network call.
p = {
"id": "p_prompt_empty",
"name": "Prompt Empty JSON Error",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "ProviderCall",
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer TEST\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": 0.1\n}"
}
},
"blocks": [] # empty -> PROMPT empty
},
"in": {}
}
]
}
exe = PipelineExecutor(p)
try:
out = await exe.run(_base_ctx())
print("UNEXPECTED_OK:", _pp(out))
except Exception as e:
print("EXPECTED_ERROR:", type(e).__name__, str(e))
async def scenario_rawforward_vendor_unknown():
print("\n=== SCENARIO 6: RawForward vendor unknown (non-JSON body simulated) ===")
# We simulate incoming.json as a plain string that doesn't look like any known vendor payload.
# RawForward will try vendor detect, fail and raise ExecutionError (collect logs, do not fix).
p = {
"id": "p_rawforward_unknown",
"name": "RawForward Unknown",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "RawForward",
"config": {
# No base_url -> autodetect vendor from incoming.json (will fail)
"passthrough_headers": True,
"extra_headers": "{}"
},
"in": {}
}
]
}
exe = PipelineExecutor(p)
ctx = _base_ctx()
ctx["incoming"] = {
"method": "POST",
"url": "http://example.test/whatever",
"path": "/whatever",
"query": "",
"headers": {"Content-Type": "text/plain"},
"json": "raw-plain-body-simulated" # NOT JSON object -> detect_vendor -> unknown
}
try:
out = await exe.run(ctx)
print("UNEXPECTED_OK:", _pp(out))
except Exception as e:
print("EXPECTED_ERROR:", type(e).__name__, str(e))
def run_all():
async def main():
await scenario_if_single_quotes_ok()
await scenario_if_error_logging()
await scenario_multi_depends_array()
await scenario_gate_only_dependency()
await scenario_provider_prompt_empty_json_error()
await scenario_rawforward_vendor_unknown()
print("\n=== EDGE CASES: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()