70 lines
3.2 KiB
Python
70 lines
3.2 KiB
Python
from agentui.pipeline.executor import ProviderCallNode
|
||
|
||
def run_checks():
|
||
# Конфиг ноды с prompt-блоками
|
||
blocks = [
|
||
{"id": "b1", "name": "Sys", "role": "system", "prompt": "System: {{ model }}", "enabled": True, "order": 0},
|
||
{"id": "b2", "name": "UserMsg", "role": "user", "prompt": "User says [[VAR:chat.last_user]]", "enabled": True, "order": 1},
|
||
{"id": "b3", "name": "Asst", "role": "assistant", "prompt": "Prev assistant turn", "enabled": True, "order": 2},
|
||
]
|
||
node = ProviderCallNode("test", {"blocks": blocks})
|
||
|
||
context = {
|
||
"model": "gpt-x",
|
||
"params": {"temperature": 0.4, "max_tokens": 100, "top_p": 0.9, "stop": ["STOP"]},
|
||
"chat": {"last_user": "Привет"},
|
||
"OUT": {},
|
||
"vendor_format": "openai",
|
||
}
|
||
|
||
# 1) Рендер в унифицированные сообщения
|
||
unified = node._render_blocks_to_unified(context)
|
||
assert len(unified) == 3
|
||
assert unified[0]["role"] == "system" and unified[0]["content"] == "System: gpt-x"
|
||
assert unified[1]["role"] == "user" and "Привет" in unified[1]["content"]
|
||
assert unified[2]["role"] == "assistant"
|
||
|
||
# 2) OpenAI
|
||
p_openai = node._messages_to_payload("openai", unified, context)
|
||
assert p_openai["model"] == "gpt-x"
|
||
assert isinstance(p_openai["messages"], list) and len(p_openai["messages"]) == 3
|
||
assert p_openai["messages"][0]["role"] == "system"
|
||
assert p_openai["messages"][1]["role"] == "user" and "Привет" in p_openai["messages"][1]["content"]
|
||
assert p_openai["temperature"] == 0.4
|
||
assert p_openai["max_tokens"] == 100
|
||
assert p_openai["top_p"] == 0.9
|
||
assert p_openai["stop"] == ["STOP"]
|
||
|
||
# 3) Gemini
|
||
p_gemini = node._messages_to_payload("gemini", unified, context)
|
||
assert p_gemini["model"] == "gpt-x"
|
||
assert "contents" in p_gemini and isinstance(p_gemini["contents"], list)
|
||
# system уходит в systemInstruction
|
||
assert "systemInstruction" in p_gemini and "parts" in p_gemini["systemInstruction"]
|
||
assert p_gemini["systemInstruction"]["parts"][0]["text"] == "System: gpt-x"
|
||
# user/assistant -> contents (assistant => role=model)
|
||
roles = [c["role"] for c in p_gemini["contents"]]
|
||
assert roles == ["user", "model"]
|
||
assert "Привет" in p_gemini["contents"][0]["parts"][0]["text"]
|
||
gen = p_gemini.get("generationConfig", {})
|
||
assert gen.get("temperature") == 0.4
|
||
assert gen.get("maxOutputTokens") == 100
|
||
assert gen.get("topP") == 0.9
|
||
assert gen.get("stopSequences") == ["STOP"]
|
||
|
||
# 4) Claude
|
||
p_claude = node._messages_to_payload("claude", unified, context)
|
||
assert p_claude["model"] == "gpt-x"
|
||
assert p_claude["system"] == "System: gpt-x"
|
||
assert isinstance(p_claude["messages"], list)
|
||
assert p_claude["messages"][0]["role"] == "user"
|
||
assert p_claude["messages"][0]["content"][0]["type"] == "text"
|
||
assert "Привет" in p_claude["messages"][0]["content"][0]["text"]
|
||
assert p_claude["temperature"] == 0.4
|
||
assert p_claude["max_tokens"] == 100
|
||
assert p_claude["top_p"] == 0.9
|
||
assert p_claude["stop"] == ["STOP"]
|
||
|
||
if __name__ == "__main__":
|
||
run_checks()
|
||
print("Prompt Manager payload conversion tests: OK") |