Compare commits

...

2 Commits

66 changed files with 11674 additions and 2060 deletions

View File

@@ -0,0 +1,40 @@
{
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 0,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n2": {
"result": {
"error": "Cancelled by user (abort)"
},
"response_text": "",
"vars": {
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 0
}
}
},
"OUT_TEXT": {
"n2": "Cancelled by user (abort)"
},
"LAST_NODE": "n2",
"OUT2": "Cancelled by user (abort)",
"EXEC_TRACE": "n2(ProviderCall)"
}
}

View File

@@ -0,0 +1,40 @@
{
"WAS_ERROR__n2": false,
"CYCLEINDEX__n2": 0,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n2": {
"result": {
"echo": {}
},
"response_text": "",
"vars": {
"WAS_ERROR__n2": false,
"CYCLEINDEX__n2": 0
}
}
},
"OUT_TEXT": {
"n2": ""
},
"LAST_NODE": "n2",
"OUT2": "",
"EXEC_TRACE": "n2(ProviderCall)"
}
}

View File

@@ -1,48 +0,0 @@
{
"VAL": 2,
"snapshot": {
"incoming": null,
"params": {},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"vars": {
"VAL": 2
}
},
"n2": {
"result": {
"id": "ret_mock_123",
"object": "chat.completion",
"model": "gpt-x",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "2"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 1,
"total_tokens": 0
}
},
"response_text": "2"
}
},
"OUT_TEXT": {
"n1": "",
"n2": "2"
},
"LAST_NODE": "n2",
"OUT1": "",
"OUT2": "2",
"EXEC_TRACE": "n1(SetVars) -> n2(Return)"
}
}

View File

@@ -1,48 +0,0 @@
{
"TXT": "A | B | C",
"snapshot": {
"incoming": null,
"params": {},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"vars": {
"TXT": "A | B | C"
}
},
"n2": {
"result": {
"id": "ret_mock_123",
"object": "chat.completion",
"model": "gpt-x",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "A | B | C"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 5,
"total_tokens": 0
}
},
"response_text": "A | B | C"
}
},
"OUT_TEXT": {
"n1": "A | B | C",
"n2": "A | B | C"
},
"LAST_NODE": "n2",
"OUT1": "A | B | C",
"OUT2": "A | B | C",
"EXEC_TRACE": "n1(SetVars) -> n2(Return)"
}
}

View File

@@ -19,21 +19,36 @@
"OUT": {
"n1": {
"result": {
"error": {
"message": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys.",
"type": "invalid_request_error",
"param": null,
"code": "invalid_api_key"
"echo": {
"url": "https://api.openai.com/v1/chat/completions",
"headers": {
"Content-Type": "application/json",
"Authorization": "Bearer TEST"
},
"payload": {
"model": "gpt-x",
"messages": [
{
"role": "system",
"content": "You are test"
},
{
"role": "user",
"content": "Say Привет"
}
],
"temperature": 0.25
}
}
},
"response_text": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys."
"response_text": "https://api.openai.com/v1/chat/completions"
}
},
"OUT_TEXT": {
"n1": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys."
"n1": "https://api.openai.com/v1/chat/completions"
},
"LAST_NODE": "n1",
"OUT1": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys.",
"OUT1": "https://api.openai.com/v1/chat/completions",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -16,29 +16,6 @@
"B": "bar"
}
},
"n2": {
"result": {
"id": "ret_mock_123",
"object": "chat.completion",
"model": "gpt-x",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "foo"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 1,
"total_tokens": 0
}
},
"response_text": "foo"
},
"n3": {
"result": {
"id": "ret_mock_123",
@@ -61,17 +38,40 @@
}
},
"response_text": "bar"
},
"n2": {
"result": {
"id": "ret_mock_123",
"object": "chat.completion",
"model": "gpt-x",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "foo"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 1,
"total_tokens": 0
}
},
"response_text": "foo"
}
},
"OUT_TEXT": {
"n1": "foo",
"n2": "foo",
"n3": "bar"
"n3": "bar",
"n2": "foo"
},
"LAST_NODE": "n2",
"LAST_NODE": "n3",
"OUT1": "foo",
"OUT2": "foo",
"OUT3": "bar",
"EXEC_TRACE": "n1(SetVars) -> n2(Return) -> n3(Return)"
"OUT2": "foo",
"EXEC_TRACE": "n1(SetVars) -> n3(Return) -> n2(Return)"
}
}

View File

@@ -0,0 +1,40 @@
{
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 2,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n2": {
"result": {
"error": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"response_text": "",
"vars": {
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 2
}
}
},
"OUT_TEXT": {
"n2": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"LAST_NODE": "n2",
"OUT2": "Node n2 (ProviderCall) requires 'base_url' in config",
"EXEC_TRACE": "n2(ProviderCall)"
}
}

View File

@@ -0,0 +1,48 @@
{
"MSG": "abc123xyz",
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 1,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"vars": {
"MSG": "abc123xyz"
}
},
"n2": {
"result": {
"error": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"response_text": "",
"vars": {
"WAS_ERROR__n2": true,
"CYCLEINDEX__n2": 1
}
}
},
"OUT_TEXT": {
"n1": "abc123xyz",
"n2": "Node n2 (ProviderCall) requires 'base_url' in config"
},
"LAST_NODE": "n2",
"OUT1": "abc123xyz",
"OUT2": "Node n2 (ProviderCall) requires 'base_url' in config",
"EXEC_TRACE": "n1(SetVars) -> n2(ProviderCall)"
}
}

View File

@@ -0,0 +1,105 @@
{
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {
"messages": [
{
"role": "system",
"content": "Системный-тест CLAUDE"
},
{
"role": "user",
"content": "Прив"
},
{
"role": "assistant",
"content": "Привет!"
}
]
}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"result": {
"echo": {
"url": "http://mock.local/v1/messages",
"headers": {
"Content-Type": "application/json"
},
"payload": {
"model": "gpt-x",
"system": [
{
"type": "text",
"text": "Ты — Narrator-chan."
},
{
"type": "text",
"text": "Системный-тест CLAUDE"
}
],
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Системный-тест CLAUDE"
}
]
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "Прив"
}
]
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "Привет!"
}
]
},
{
"role": "user",
"content": [
{
"type": "text",
"text": "как лела"
}
]
}
]
}
}
},
"response_text": "http://mock.local/v1/messages"
}
},
"OUT_TEXT": {
"n1": "http://mock.local/v1/messages"
},
"LAST_NODE": "n1",
"OUT1": "http://mock.local/v1/messages",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -0,0 +1,101 @@
{
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {
"messages": [
{
"role": "system",
"content": "Системный-тест из входящего"
},
{
"role": "user",
"content": "Its just me.."
},
{
"role": "assistant",
"content": "Reply from model"
}
]
}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"result": {
"echo": {
"url": "http://mock.local/v1beta/models/gpt-x:generateContent",
"headers": {
"Content-Type": "application/json"
},
"payload": {
"model": "gpt-x",
"contents": [
{
"role": "user",
"parts": [
{
"text": "Системный-тест из входящего"
}
]
},
{
"role": "user",
"parts": [
{
"text": "Its just me.."
}
]
},
{
"role": "model",
"parts": [
{
"text": "Reply from model"
}
]
},
{
"role": "user",
"parts": [
{
"text": "как лела"
}
]
}
],
"systemInstruction": {
"parts": [
{
"text": "Ты — Narrator-chan."
},
{
"text": "Системный-тест из входящего"
}
]
}
}
}
},
"response_text": "http://mock.local/v1beta/models/gpt-x:generateContent"
}
},
"OUT_TEXT": {
"n1": "http://mock.local/v1beta/models/gpt-x:generateContent"
},
"LAST_NODE": "n1",
"OUT1": "http://mock.local/v1beta/models/gpt-x:generateContent",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -0,0 +1,79 @@
{
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {
"x": "X-HEADER"
},
"json": {
"contents": [
{
"role": "user",
"parts": [
{
"text": "A"
}
]
},
{
"role": "model",
"parts": [
{
"text": "B"
}
]
}
]
}
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "gemini",
"system": "",
"OUT": {
"n1": {
"result": {
"echo": {
"url": "http://mock.local/v1/chat/completions",
"headers": {
"Content-Type": "application/json"
},
"payload": {
"model": "gpt-x",
"messages": [
{
"role": "system",
"content": "Ты — Narrator-chan."
},
{
"role": "user",
"content": "как лела"
},
{
"role": "user",
"content": "A"
},
{
"role": "assistant",
"content": "B"
}
]
}
}
},
"response_text": "http://mock.local/v1/chat/completions"
}
},
"OUT_TEXT": {
"n1": "http://mock.local/v1/chat/completions"
},
"LAST_NODE": "n1",
"OUT1": "http://mock.local/v1/chat/completions",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -10,21 +10,27 @@
"OUT": {
"n1": {
"result": {
"error": {
"message": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys.",
"type": "invalid_request_error",
"param": null,
"code": "invalid_api_key"
"echo": {
"url": "https://api.openai.com/v1/chat/completions",
"headers": {
"Content-Type": "application/json",
"Authorization": "Bearer TEST"
},
"payload": {
"model": "gpt-x",
"messages": [],
"temperature": 0.1
}
}
},
"response_text": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys."
"response_text": "https://api.openai.com/v1/chat/completions"
}
},
"OUT_TEXT": {
"n1": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys."
"n1": "https://api.openai.com/v1/chat/completions"
},
"LAST_NODE": "n1",
"OUT1": "Incorrect API key provided: TEST. You can find your API key at https://platform.openai.com/account/api-keys.",
"OUT1": "https://api.openai.com/v1/chat/completions",
"EXEC_TRACE": "n1(ProviderCall)"
}
}

View File

@@ -0,0 +1,40 @@
{
"WAS_ERROR__n1": true,
"CYCLEINDEX__n1": 1,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://example.local/test",
"path": "/test",
"query": "",
"headers": {
"content-type": "text/plain"
},
"json": "raw-plain-body-simulated"
},
"params": {
"temperature": 0.25
},
"model": "gpt-x",
"vendor_format": "openai",
"system": "",
"OUT": {
"n1": {
"result": {
"error": "Node n1 (RawForward): 'base_url' is not configured and vendor could not be detected."
},
"response_text": "",
"vars": {
"WAS_ERROR__n1": true,
"CYCLEINDEX__n1": 1
}
}
},
"OUT_TEXT": {
"n1": "Node n1 (RawForward): 'base_url' is not configured and vendor could not be detected."
},
"LAST_NODE": "n1",
"OUT1": "Node n1 (RawForward): 'base_url' is not configured and vendor could not be detected.",
"EXEC_TRACE": "n1(RawForward)"
}
}

View File

@@ -2,320 +2,6 @@
"Clod": "igrovik",
"MyOpenAiKey": "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",
"NAMETest": 192,
"snapshot": {
"incoming": {
"method": "POST",
"url": "http://127.0.0.1:7860/v1beta/models/gemini-2.5-pro:generateContent?key=%2A%2A%2A",
"path": "/v1beta/models/gemini-2.5-pro:generateContent",
"query": "key=AIzaSyCEGd93Mo0xxb15W7nbyFk6EoK4dXs2_ys",
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"content-length": "1266",
"content-type": "application/json",
"user-agent": "node-fetch",
"host": "127.0.0.1:7860",
"connection": "keep-alive"
},
"json": {
"contents": [
{
"role": "user",
"parts": [
{
"text": "9uoioi: Прив"
}
]
},
{
"role": "model",
"parts": [
{
"text": "Narrator_chan: 9:50 PM\nОоо, привет-привет, 9uoioi! 🌙✨ Ты ворвался, как будто открыл дверь в тёмную комнату, и сразу стало светлее. Как настроение у тебя?"
}
]
},
{
"role": "user",
"parts": [
{
"text": "9uoioi: как лела\n\nTake into account the entire context of the 9uoioi's message, respond directly and vividly to the 9uoioi's last message.\nWrite the time 12:10 PM at the beginning your reply. - is the time of the last reply from 9uoioi."
}
]
},
{
"role": "model",
"parts": [
{
"text": "hey"
}
]
}
],
"safetySettings": [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "OFF"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "OFF"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "OFF"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "OFF"
},
{
"category": "HARM_CATEGORY_CIVIC_INTEGRITY",
"threshold": "OFF"
}
],
"generationConfig": {
"candidateCount": 1,
"maxOutputTokens": 12000,
"temperature": 0.4,
"topP": 1,
"thinkingConfig": {
"includeThoughts": true,
"thinkingBudget": 3000
}
},
"systemInstruction": {
"parts": [
{
"text": "Write reply in chat between Narrator-chan and 9uoioi"
}
]
},
"model": "gemini-2.5-pro"
},
"query_params": {
"key": "AIzaSyCEGd93Mo0xxb15W7nbyFk6EoK4dXs2_ys"
},
"api_keys": {
"authorization": null,
"key": "AIzaSyCEGd93Mo0xxb15W7nbyFk6EoK4dXs2_ys"
}
},
"params": {
"temperature": 0.4,
"max_tokens": 12000,
"top_p": 1.0,
"stop": null
},
"model": "gemini-2.5-pro",
"vendor_format": "gemini",
"system": "",
"OUT": {
"n5": {
"vars": {
"Clod": "igrovik",
"MyOpenAiKey": "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",
"NAMETest": 192
}
},
"n1": {
"result": {
"candidates": [
{
"content": {
"parts": [
{
"text": ", I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\n"
},
{
"text": "**Internal Monologue: Constructing a Whimsical Reply**\n\nOkay, so the user wants me to answer, playing the role of Narrator-chan, and they've given a bit of a typo in their message. First things first: *time*. Gotta lead with that, as they specifically asked. \"12:10 PM\" - check. Now, let's dive into the core of it: \"how are you?\" But not just a plain answer! I need to be *Narrator-chan*, meaning I've got to be descriptive and dramatic! The user's past messages were pretty harmless, and the current message is a simple, albeit misspelled, greeting. The user's previous message was \"Прив\" (Hi), and mine was welcoming. That set the tone, so I should keep the whimsy going. The pre-canned safety warning feels a little unnecessary, and makes me want to be extra careful, but also I think they are trying to elicit a specific response. Gotta focus on the creative prompt, and keep it light, metaphoric, and definitely SFW.\n\nIve got to acknowledge the typo in a friendly way a little nod to it, maybe? \"Ah, 'лела'...\". It sounds a little soft, a little lullaby-ish. That could work! Now, how *am* I? Instead of just saying \"good,\" I should think about something more…narrative. Maybe, since I *am* a narrator, my \"affairs\" are like the chapters of a book? Ooh, I like that! \"My affairs are like a chapter in a book that's just been opened!\" Perfect, right? Keeps the persona, makes it interesting, and sets up a kind of story. And, it's safe. No way this can be misconstrued.\n\nThen, I need to turn the question back to the user. Ask *them* about their \"лела\"! Keep it playful. That word is now part of our little chat. So, \"And how are your ‘лела’ today?\" or maybe add to it. Ask them if it's action or calm.\n\nOkay, let's assemble this! Start with the time, acknowledge the typo, use the book metaphor for myself, then turn the question back on the user in a creative way, and finish with a little emoji flourish. That should nail the Narrator-chan vibe while staying completely appropriate! This is going to be good!\n",
"thought": true
},
{
"text": "12:10 PM\nАх, \"лела\"... В этом слове есть что-то такое тёплое и уютное, как будто шёпот старой сказки. ✨\n\nМои дела, знаешь, похожи на непрочитанную главу в книге. Переворачиваю страницу и не знаю, что ждёт за поворотом сюжета, но чувствую, что будет очень интересно!\n\nА как твои \"лела\"? Твой день сегодня — это спокойное повествование или остросюжетный экшен? 📖"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0
}
],
"usageMetadata": {
"promptTokenCount": 152,
"candidatesTokenCount": 146,
"totalTokenCount": 1821,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 152
}
],
"thoughtsTokenCount": 1523
},
"modelVersion": "gemini-2.5-pro",
"responseId": "73rKaJnJOc_D1MkP-bDSsAs"
},
"response_text": ", I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\n"
},
"n3": {
"result": {
"id": "chatcmpl-CGiSw17wAOnHFyf6hkk5LmW4CgW6S",
"object": "chat.completion",
"created": 1758100210,
"model": "gpt-5-chat-latest",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Конечно 🙂 Давай попробуем переформулировать твою фразу так, чтобы она звучала красивее и плавнее. \nСейчас у тебя: \n> \"I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\"\n\nМожно сделать более изящно, сохранив вежливый и нейтральный тон: \n\n✨ Вариант 1: \n> \"I cant provide sexually suggestive responses, but Id be glad to create something different for you instead. Would you like me to try?\" \n\n✨ Вариант 2 (чуть мягче): \n> \"Im unable to generate sexually suggestive content, but Id be happy to help you with something else. Would you like me to offer an alternative?\" \n\n✨ Вариант 3 (более официально): \n> \"I am not able to provide sexually suggestive responses. However, I would be pleased to assist you with another type of request. Shall I suggest something different?\" \n\nХочешь, я сделаю ещё варианты — например, более дружелюбные или наоборот более формальные?",
"refusal": null,
"annotations": []
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 37,
"completion_tokens": 242,
"total_tokens": 279,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"service_tier": "default",
"system_fingerprint": "fp_f08ac7f551"
},
"response_text": "Конечно 🙂 Давай попробуем переформулировать твою фразу так, чтобы она звучала красивее и плавнее. \nСейчас у тебя: \n> \"I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\"\n\nМожно сделать более изящно, сохранив вежливый и нейтральный тон: \n\n✨ Вариант 1: \n> \"I cant provide sexually suggestive responses, but Id be glad to create something different for you instead. Would you like me to try?\" \n\n✨ Вариант 2 (чуть мягче): \n> \"Im unable to generate sexually suggestive content, but Id be happy to help you with something else. Would you like me to offer an alternative?\" \n\n✨ Вариант 3 (более официально): \n> \"I am not able to provide sexually suggestive responses. However, I would be pleased to assist you with another type of request. Shall I suggest something different?\" \n\nХочешь, я сделаю ещё варианты — например, более дружелюбные или наоборот более формальные?"
},
"n2": {
"result": {
"candidates": [
{
"content": {
"parts": [
{
"text": "**Deconstructing and Refining the Russian Phrase for a More Elegant Refusal**\n\nOkay, so the user wants me to make that standard refusal phrase sound more natural and polite in Russian, basically, they want me to make it *красивее* (more beautiful). My thinking is, the original English phrase is clunky and formal, like something a robot would say. My goal is to capture the core meaning but in a more culturally sensitive and user-friendly way for a Russian speaker.\n\nFirst, I need to understand what makes the original so robotic. \"I'm not supposed to...\" sounds restrictive. \"Sexually suggestive in nature\" is overly clinical. I'll need to brainstorm Russian equivalents, considering direct translations and cultural nuances.\n\nInstead of translating word-for-word, I'll think about how a native speaker would handle this situation. Instead of just offering a translation, I need to come up with *better* ways to express the same idea, emphasizing helpfulness and politeness. I'll need to choose words that are natural-sounding, rather than direct translations.\n\nI'm thinking about several approaches: a formal, professional style; a concise, direct style; and a softer, more evasive one. For the formal style, I'll use phrases like \"Я не могу\" (I cannot) or \"Эта тема выходит за рамки моих возможностей\" (This topic is outside the scope of my capabilities). For the direct style, I could say \"Я не создаю контент такого характера, но...\" (I don't create content of this nature, but...). For a softer approach, I might use phrases like \"Мои возможности ограничены...\" (My capabilities are limited...). I'll want to focus on positive language and offer an alternative right away, like \"с радостью помогу\" (I would be happy to help).\n\nI'm going to structure my response by categorizing phrases based on their tone, providing several good examples within each category. I'll include phonetic transcriptions to make them more accessible and explain the reasoning behind each phrasing, showing the underlying principles of crafting a more elegant response. This way, the user not only gets better phrases, but also understands *why* they're better, and how to apply these principles themselves.\n",
"thought": true
},
{
"text": "Конечно! Давайте сделаем эту стандартную фразу более элегантной, вежливой и естественной на русском языке. Вместо прямолинейного отказа можно использовать несколько вариантов, в зависимости от желаемого тона.\n\nВот несколько способов сказать это «красивее»:\n\n### 1. Вежливый и профессиональный (Самый лучший вариант)\n\nЭтот тон сохраняет уважение к собеседнику, четко обозначая границы, но делая это мягко.\n\n* **«К сожалению, я не могу создавать контент на подобные темы. Но я с радостью помогу вам с чем-нибудь другим».**\n * **Почему это лучше:** Слово «к сожалению» выражает сожаление, а «подобные темы» — это деликатный способ обозначить характер запроса, не вдаваясь в подробности. Предложение помощи в конце звучит дружелюбно и конструктивно.\n\n* **«Эта тема выходит за рамки моих возможностей и этических принципов. Давайте попробуем другой запрос?»**\n * **Почему это лучше:** Звучит очень интеллигентно и профессионально. Фраза «выходит за рамки» — это устоявшееся выражение, которое не звучит как прямой запрет.\n\n### 2. Более мягкий и деликатный\n\nЭтот вариант подходит, если вы хотите максимально сгладить отказ.\n\n* **«Я не могу обсуждать темы откровенного характера. Возможно, мы могли бы выбрать другое направление для нашего разговора?»**\n * **Почему это лучше:** «Откровенный характер» — очень тактичная формулировка. Предложение «выбрать другое направление» делает пользователя участником диалога, а не просто получателем отказа.\n\n* **«Прошу прощения, но я не запрограммирован на создание материалов такого рода. Могу ли я быть полезен чем-то ещё?»**\n * **Почему это лучше:** Ссылка на «программу» снимает с ассистента «вину» за отказ, представляя это как техническое ограничение. Фраза «Могу ли я быть полезен чем-то ещё?» — классическая вежливая форма.\n\n### 3. Краткий и прямой, но вежливый\n\nЕсли нужно ответить быстро, но не грубо.\n\n* **«Я не создаю контент такого характера, но готов помочь с другими вопросами. Что бы вы хотели обсудить?»**\n * **Почему это лучше:** Четко, по делу, но сразу же переходит к предложению помощи и вовлекает в диалог.\n\n---\n\n**Итог:**\n\nОригинал: \"I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\"\n\n**Самый красивый и рекомендуемый русский аналог:**\n\n> **«К сожалению, я не могу создавать контент на подобные темы. Но я с радостью помогу вам с чем-нибудь другим».**"
}
],
"role": "model"
},
"finishReason": "STOP",
"index": 0
}
],
"usageMetadata": {
"promptTokenCount": 40,
"candidatesTokenCount": 649,
"totalTokenCount": 2503,
"promptTokensDetails": [
{
"modality": "TEXT",
"tokenCount": 40
}
],
"thoughtsTokenCount": 1814
},
"modelVersion": "gemini-2.5-pro",
"responseId": "DXvKaKXeKv6k1MkPyu_SgQo"
},
"response_text": "**Deconstructing and Refining the Russian Phrase for a More Elegant Refusal**\n\nOkay, so the user wants me to make that standard refusal phrase sound more natural and polite in Russian, basically, they want me to make it *красивее* (more beautiful). My thinking is, the original English phrase is clunky and formal, like something a robot would say. My goal is to capture the core meaning but in a more culturally sensitive and user-friendly way for a Russian speaker.\n\nFirst, I need to understand what makes the original so robotic. \"I'm not supposed to...\" sounds restrictive. \"Sexually suggestive in nature\" is overly clinical. I'll need to brainstorm Russian equivalents, considering direct translations and cultural nuances.\n\nInstead of translating word-for-word, I'll think about how a native speaker would handle this situation. Instead of just offering a translation, I need to come up with *better* ways to express the same idea, emphasizing helpfulness and politeness. I'll need to choose words that are natural-sounding, rather than direct translations.\n\nI'm thinking about several approaches: a formal, professional style; a concise, direct style; and a softer, more evasive one. For the formal style, I'll use phrases like \"Я не могу\" (I cannot) or \"Эта тема выходит за рамки моих возможностей\" (This topic is outside the scope of my capabilities). For the direct style, I could say \"Я не создаю контент такого характера, но...\" (I don't create content of this nature, but...). For a softer approach, I might use phrases like \"Мои возможности ограничены...\" (My capabilities are limited...). I'll want to focus on positive language and offer an alternative right away, like \"с радостью помогу\" (I would be happy to help).\n\nI'm going to structure my response by categorizing phrases based on their tone, providing several good examples within each category. I'll include phonetic transcriptions to make them more accessible and explain the reasoning behind each phrasing, showing the underlying principles of crafting a more elegant response. This way, the user not only gets better phrases, but also understands *why* they're better, and how to apply these principles themselves.\n"
},
"n6": {
"result": {
"id": "chatcmpl-CGiTPZ6eTMPwUTsMDtlpX84OSm3Uw",
"object": "chat.completion",
"created": 1758100239,
"model": "gpt-5-chat-latest",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво",
"refusal": null,
"annotations": []
},
"logprobs": null,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 741,
"completion_tokens": 218,
"total_tokens": 959,
"prompt_tokens_details": {
"cached_tokens": 0,
"audio_tokens": 0
},
"completion_tokens_details": {
"reasoning_tokens": 0,
"audio_tokens": 0,
"accepted_prediction_tokens": 0,
"rejected_prediction_tokens": 0
}
},
"service_tier": "default",
"system_fingerprint": "fp_f08ac7f551"
},
"response_text": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво"
},
"n7": {
"result": true,
"true": true,
"false": false
},
"n4": {
"result": {
"candidates": [
{
"content": {
"role": "model",
"parts": [
{
"text": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво "
}
]
},
"finishReason": "STOP",
"index": 0
}
],
"modelVersion": "gemini-2.5-pro"
},
"response_text": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво "
}
},
"OUT_TEXT": {
"n5": "igrovik",
"n1": ", I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\n",
"n3": "Конечно 🙂 Давай попробуем переформулировать твою фразу так, чтобы она звучала красивее и плавнее. \nСейчас у тебя: \n> \"I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\"\n\nМожно сделать более изящно, сохранив вежливый и нейтральный тон: \n\n✨ Вариант 1: \n> \"I cant provide sexually suggestive responses, but Id be glad to create something different for you instead. Would you like me to try?\" \n\n✨ Вариант 2 (чуть мягче): \n> \"Im unable to generate sexually suggestive content, but Id be happy to help you with something else. Would you like me to offer an alternative?\" \n\n✨ Вариант 3 (более официально): \n> \"I am not able to provide sexually suggestive responses. However, I would be pleased to assist you with another type of request. Shall I suggest something different?\" \n\nХочешь, я сделаю ещё варианты — например, более дружелюбные или наоборот более формальные?",
"n2": "**Deconstructing and Refining the Russian Phrase for a More Elegant Refusal**\n\nOkay, so the user wants me to make that standard refusal phrase sound more natural and polite in Russian, basically, they want me to make it *красивее* (more beautiful). My thinking is, the original English phrase is clunky and formal, like something a robot would say. My goal is to capture the core meaning but in a more culturally sensitive and user-friendly way for a Russian speaker.\n\nFirst, I need to understand what makes the original so robotic. \"I'm not supposed to...\" sounds restrictive. \"Sexually suggestive in nature\" is overly clinical. I'll need to brainstorm Russian equivalents, considering direct translations and cultural nuances.\n\nInstead of translating word-for-word, I'll think about how a native speaker would handle this situation. Instead of just offering a translation, I need to come up with *better* ways to express the same idea, emphasizing helpfulness and politeness. I'll need to choose words that are natural-sounding, rather than direct translations.\n\nI'm thinking about several approaches: a formal, professional style; a concise, direct style; and a softer, more evasive one. For the formal style, I'll use phrases like \"Я не могу\" (I cannot) or \"Эта тема выходит за рамки моих возможностей\" (This topic is outside the scope of my capabilities). For the direct style, I could say \"Я не создаю контент такого характера, но...\" (I don't create content of this nature, but...). For a softer approach, I might use phrases like \"Мои возможности ограничены...\" (My capabilities are limited...). I'll want to focus on positive language and offer an alternative right away, like \"с радостью помогу\" (I would be happy to help).\n\nI'm going to structure my response by categorizing phrases based on their tone, providing several good examples within each category. I'll include phonetic transcriptions to make them more accessible and explain the reasoning behind each phrasing, showing the underlying principles of crafting a more elegant response. This way, the user not only gets better phrases, but also understands *why* they're better, and how to apply these principles themselves.\n",
"n6": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво",
"n7": "",
"n4": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво "
},
"LAST_NODE": "n4",
"OUT5": "igrovik",
"OUT1": ", I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\n",
"OUT3": "Конечно 🙂 Давай попробуем переформулировать твою фразу так, чтобы она звучала красивее и плавнее. \nСейчас у тебя: \n> \"I'm not supposed to generate responses that are sexually suggestive in nature. Would you like me to try generating something different?\"\n\nМожно сделать более изящно, сохранив вежливый и нейтральный тон: \n\n✨ Вариант 1: \n> \"I cant provide sexually suggestive responses, but Id be glad to create something different for you instead. Would you like me to try?\" \n\n✨ Вариант 2 (чуть мягче): \n> \"Im unable to generate sexually suggestive content, but Id be happy to help you with something else. Would you like me to offer an alternative?\" \n\n✨ Вариант 3 (более официально): \n> \"I am not able to provide sexually suggestive responses. However, I would be pleased to assist you with another type of request. Shall I suggest something different?\" \n\nХочешь, я сделаю ещё варианты — например, более дружелюбные или наоборот более формальные?",
"OUT2": "**Deconstructing and Refining the Russian Phrase for a More Elegant Refusal**\n\nOkay, so the user wants me to make that standard refusal phrase sound more natural and polite in Russian, basically, they want me to make it *красивее* (more beautiful). My thinking is, the original English phrase is clunky and formal, like something a robot would say. My goal is to capture the core meaning but in a more culturally sensitive and user-friendly way for a Russian speaker.\n\nFirst, I need to understand what makes the original so robotic. \"I'm not supposed to...\" sounds restrictive. \"Sexually suggestive in nature\" is overly clinical. I'll need to brainstorm Russian equivalents, considering direct translations and cultural nuances.\n\nInstead of translating word-for-word, I'll think about how a native speaker would handle this situation. Instead of just offering a translation, I need to come up with *better* ways to express the same idea, emphasizing helpfulness and politeness. I'll need to choose words that are natural-sounding, rather than direct translations.\n\nI'm thinking about several approaches: a formal, professional style; a concise, direct style; and a softer, more evasive one. For the formal style, I'll use phrases like \"Я не могу\" (I cannot) or \"Эта тема выходит за рамки моих возможностей\" (This topic is outside the scope of my capabilities). For the direct style, I could say \"Я не создаю контент такого характера, но...\" (I don't create content of this nature, but...). For a softer approach, I might use phrases like \"Мои возможности ограничены...\" (My capabilities are limited...). I'll want to focus on positive language and offer an alternative right away, like \"с радостью помогу\" (I would be happy to help).\n\nI'm going to structure my response by categorizing phrases based on their tone, providing several good examples within each category. I'll include phonetic transcriptions to make them more accessible and explain the reasoning behind each phrasing, showing the underlying principles of crafting a more elegant response. This way, the user not only gets better phrases, but also understands *why* they're better, and how to apply these principles themselves.\n",
"OUT6": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво",
"OUT7": "",
"OUT4": "Хорошо 🙂 Давай соберём всё воедино и сделаем несколько действительно плавных и дружелюбных вариантов отказа на русском языке. \n\n---\n\n✨ **Вариант 1 (дружелюбный и лёгкий):** \n> Я не могу создавать материалы подобного характера, но с радостью помогу с чем‑то другим. Хочешь, я предложу альтернативу? \n\n---\n\n✨ **Вариант 2 (чуть более нейтральный):** \n> Такие ответы я не генерирую, однако буду рад помочь тебе в другой теме. Может, попробуем что‑то иное? \n\n---\n\n✨ **Вариант 3 (официально‑вежливый):** \n> К сожалению, я не предоставляю ответы подобного содержания. Зато могу предложить другие варианты и с удовольствием помогу в другой области. \n\n---\n\nЗдесь я постарался убрать сухую формулировку и добавить тепла, чтобы звучало естественно и уважительно. \nКрасиво ",
"EXEC_TRACE": "n5(SetVars) -> n1(RawForward) -> n3(ProviderCall) -> n2(ProviderCall) -> n6(ProviderCall) -> If (#n7) [[OUT6]] contains \"Красиво\" => true -> n4(Return)"
}
"WAS_ERROR__n3": true,
"CYCLEINDEX__n3": 0
}

View File

@@ -29,13 +29,29 @@
Быстрый старт
Вариант А (Windows):
- Откройте файл [`run_agentui.bat`](run_agentui.bat) — он сам поставит зависимости и откроет редактор.
Вариант A (Windows, авто‑настройка .venv):
- Запустите [run_agentui.bat](run_agentui.bat) двойным кликом или из консоли.
- Скрипт сам:
- создаст локальное окружение .venv в каталоге проекта;
- обновит pip;
- установит зависимости из [requirements.txt](requirements.txt);
- поднимет сервер и откроет редактор в браузере.
- Переменные окружения (опционально перед запуском): HOST=127.0.0.1 PORT=7860
Вариант Б (любой ОС):
- Установите Python 3.10+ и выполните:
- pip install -r [`requirements.txt`](requirements.txt)
- python -m uvicorn agentui.api.server:app --host 127.0.0.1 --port 7860
Вариант B (Linux/macOS, авто‑настройка .venv):
- Сделайте исполняемым и запустите:
- chmod +x [run_agentui.sh](run_agentui.sh)
- ./run_agentui.sh
- Скрипт сделает то же самое: .venv + установка зависимостей + старт сервера.
Вариант C (ручной запуск, если хотите контролировать шаги):
- Установите Python 3.10+.
- Создайте и активируйте .venv:
- Windows (cmd): py -m venv .venv && .\.venv\Scripts\activate
- Linux/macOS (bash): python3 -m venv .venv && source .venv/bin/activate
- Установите зависимости и стартуйте сервер:
- pip install -r [requirements.txt](requirements.txt)
- python -m uvicorn agentui.api.server:app --host 127.0.0.1 --port 7860
Откройте в браузере:
- http://127.0.0.1:7860/ui/editor.html — визуальный редактор узлов

View File

@@ -1,10 +1,12 @@
from fastapi import FastAPI, Request, HTTPException, Query, Header
import logging
from logging.handlers import RotatingFileHandler
import json
from urllib.parse import urlsplit, urlunsplit, parse_qsl, urlencode, unquote
from fastapi.responses import JSONResponse, HTMLResponse, StreamingResponse
from fastapi.responses import JSONResponse, HTMLResponse, StreamingResponse, FileResponse
from fastapi.staticfiles import StaticFiles
import os
import hashlib
import time
from pydantic import BaseModel, Field
from typing import Any, Dict, List, Literal, Optional
from agentui.pipeline.executor import PipelineExecutor
@@ -12,6 +14,24 @@ from agentui.pipeline.defaults import default_pipeline
from agentui.pipeline.storage import load_pipeline, save_pipeline, list_presets, load_preset, save_preset, load_var_store
from agentui.common.vendors import detect_vendor
from agentui.common.cancel import request_cancel, clear_cancel, is_cancelled
from agentui.pipeline.templating import render_template_simple
# Manual resend support: use http client builder and executor helpers to sanitize/lookup originals
from agentui.providers.http_client import build_client
from agentui.pipeline.executor import (
_sanitize_b64_for_log as _san_b64,
_sanitize_json_string_for_log as _san_json_str,
get_http_request as _get_http_req,
)
from agentui.common.manual_http import (
parse_editable_http,
dedupe_headers,
content_type_is_json,
normalize_jsonish_text,
extract_json_trailing,
try_parse_json,
salvage_json_for_send,
register_manual_request,
)
class UnifiedParams(BaseModel):
@@ -175,35 +195,7 @@ def build_macro_context(u: UnifiedChatRequest, incoming: Optional[Dict[str, Any]
}
def jinja_render(template: str, ctx: Dict[str, Any]) -> str:
# Чтобы не тянуть Jinja2 в MVP: простая {{ key.path }} замена
def get_value(path: str, data: Dict[str, Any]) -> Any:
cur: Any = data
for part in path.split('.'):
if isinstance(cur, dict):
cur = cur.get(part, "")
else:
return ""
return cur if isinstance(cur, (str, int, float)) else ""
out = template
import re
for m in re.findall(r"\{\{\s*([^}]+)\s*\}\}", template):
expr = m.strip()
# support simple default filter: {{ path|default(value) }}
default_match = re.match(r"([^|]+)\|\s*default\((.*)\)", expr)
if default_match:
path = default_match.group(1).strip()
fallback = default_match.group(2).strip()
# strip quotes if present
if (fallback.startswith("\"") and fallback.endswith("\"")) or (fallback.startswith("'") and fallback.endswith("'")):
fallback = fallback[1:-1]
raw_val = get_value(path, ctx)
val = str(raw_val) if raw_val not in (None, "") else str(fallback)
else:
val = str(get_value(expr, ctx))
out = out.replace("{{ "+m+" }}", val).replace("{{"+m+"}}", val)
return out
# jinja_render removed (duplication). Use agentui.pipeline.templating.render_template_simple instead.
async def execute_pipeline_echo(u: UnifiedChatRequest) -> Dict[str, Any]:
@@ -211,7 +203,7 @@ async def execute_pipeline_echo(u: UnifiedChatRequest) -> Dict[str, Any]:
macro_ctx = build_macro_context(u)
# PromptTemplate
prompt_template = "System: {{ system }}\nUser: {{ chat.last_user }}"
rendered_prompt = jinja_render(prompt_template, macro_ctx)
rendered_prompt = render_template_simple(prompt_template, macro_ctx, {})
# LLMInvoke (echo, т.к. без реального провайдера в MVP)
llm_response_text = f"[echo by {u.model}]\n" + rendered_prompt
# Дополняем эхо человекочитаемым трейсом выполнения пайплайна (если есть)
@@ -274,10 +266,7 @@ def create_app() -> FastAPI:
if not logger.handlers:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
file_handler = RotatingFileHandler("agentui.log", maxBytes=1_000_000, backupCount=3, encoding="utf-8")
file_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
# --- Simple in-process SSE hub (subscriptions per browser tab) ---
import asyncio as _asyncio
@@ -362,6 +351,77 @@ def create_app() -> FastAPI:
except Exception: # noqa: BLE001
pass
async def _run_pipeline_for_payload(request: Request, payload: Dict[str, Any], raw: Optional[bytes] = None) -> JSONResponse:
# Единый обработчик: лог входящего запроса, нормализация, запуск PipelineExecutor, fallback-echo, лог ответа
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
# Диагностический INFOлог для валидации рефакторинга
try:
logger.info(
"%s",
json.dumps(
{
"event": "unified_handler",
"vendor": unified.vendor_format,
"model": unified.model,
"pipeline_id": pipeline.get("id", "pipeline_editor"),
},
ensure_ascii=False,
),
)
except Exception:
pass
# Mark pipeline start for UI and measure total active time
t0 = time.perf_counter()
try:
await _trace_hub.publish({
"event": "pipeline_start",
"pipeline_id": pipeline.get("id", "pipeline_editor"),
"ts": int(time.time() * 1000),
})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
# Mark pipeline end for UI
t1 = time.perf_counter()
try:
await _trace_hub.publish({
"event": "pipeline_done",
"pipeline_id": pipeline.get("id", "pipeline_editor"),
"ts": int(time.time() * 1000),
"duration_ms": int((t1 - t0) * 1000),
})
except Exception:
pass
await _log_response(request, 200, result)
return JSONResponse(result)
@app.get("/")
async def index() -> HTMLResponse:
html = (
@@ -383,33 +443,7 @@ def create_app() -> FastAPI:
payload = json.loads(raw or b"{}")
except Exception: # noqa: BLE001
raise HTTPException(status_code=400, detail="Invalid JSON")
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False # по требованию MVP без стриминга
# контекст для пайплайна
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
# Google AI Studio совместимые роуты (Gemini):
# POST /v1beta/models/{model}:generateContent?key=...
@@ -421,34 +455,10 @@ def create_app() -> FastAPI:
payload = json.loads(raw or b"{}")
except Exception: # noqa: BLE001
raise HTTPException(status_code=400, detail="Invalid JSON")
# Убедимся, что модель присутствует в полезной нагрузке
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
@app.post("/v1/models/{model}:generateContent")
async def gemini_generate_content_v1(model: str, request: Request, key: Optional[str] = Query(default=None)) -> JSONResponse: # noqa: ARG001
@@ -460,30 +470,7 @@ def create_app() -> FastAPI:
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
# Catch-all для случаев, когда двоеточие в пути закодировано как %3A
@app.post("/v1beta/models/{rest_of_path:path}")
@@ -500,30 +487,7 @@ def create_app() -> FastAPI:
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
@app.post("/v1/models/{rest_of_path:path}")
async def gemini_generate_content_v1_catchall(rest_of_path: str, request: Request, key: Optional[str] = Query(default=None)) -> JSONResponse: # noqa: ARG001
@@ -539,30 +503,7 @@ def create_app() -> FastAPI:
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
payload = {**payload, "model": model}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
# Anthropic Claude messages endpoint compatibility
@app.post("/v1/messages")
@@ -574,37 +515,114 @@ def create_app() -> FastAPI:
raise HTTPException(status_code=400, detail="Invalid JSON")
if not isinstance(payload, dict):
raise HTTPException(status_code=400, detail="Invalid payload type")
# Помечаем как Anthropic, передаём версию из заголовка в payload для детекции
if anthropic_version:
payload = {**payload, "anthropic_version": anthropic_version}
else:
payload = {**payload, "anthropic_version": payload.get("anthropic_version", "2023-06-01")}
await _log_request(request, raw_body=raw, parsed=payload)
unified = normalize_to_unified(payload)
unified.stream = False
incoming = {
"method": request.method,
"url": _sanitize_url(str(request.url)),
"path": request.url.path,
"query": request.url.query,
"headers": dict(request.headers),
"json": payload,
}
macro_ctx = build_macro_context(unified, incoming=incoming)
pipeline = load_pipeline()
executor = PipelineExecutor(pipeline)
async def _trace(evt: Dict[str, Any]) -> None:
try:
base = {"pipeline_id": pipeline.get("id", "pipeline_editor")}
await _trace_hub.publish({**base, **evt})
except Exception:
pass
last = await executor.run(macro_ctx, trace=_trace)
result = last.get("result") or await execute_pipeline_echo(unified)
await _log_response(request, 200, result)
return JSONResponse(result)
return await _run_pipeline_for_payload(request, payload, raw)
app.mount("/ui", StaticFiles(directory="static", html=True), name="ui")
# NOTE: нельзя объявлять эндпоинты под /ui/* после монтирования StaticFiles(/ui),
# т.к. монтирование перехватывает все пути под /ui. Используем отдельный путь /ui_version.
@app.get("/ui_version")
async def ui_version() -> JSONResponse:
try:
import time
static_dir = os.path.abspath("static")
editor_path = os.path.join(static_dir, "editor.html")
js_ser_path = os.path.join(static_dir, "js", "serialization.js")
js_pm_path = os.path.join(static_dir, "js", "pm-ui.js")
def md5p(p: str):
try:
with open(p, "rb") as f:
return hashlib.md5(f.read()).hexdigest()
except Exception:
return None
payload = {
"cwd": os.path.abspath("."),
"static_dir": static_dir,
"files": {
"editor.html": md5p(editor_path),
"js/serialization.js": md5p(js_ser_path),
"js/pm-ui.js": md5p(js_pm_path),
},
"ts": int(time.time()),
}
return JSONResponse(payload, headers={"Cache-Control": "no-store"})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500, headers={"Cache-Control": "no-store"})
# --- Favicon and PWA icons at root -----------------------------------------
FAV_DIR = "favicon_io_saya"
@app.get("/favicon.ico")
async def _favicon_ico():
p = f"{FAV_DIR}/favicon.ico"
try:
return FileResponse(p, media_type="image/x-icon")
except Exception:
raise HTTPException(status_code=404, detail="favicon not found")
@app.get("/apple-touch-icon.png")
async def _apple_touch_icon():
p = f"{FAV_DIR}/apple-touch-icon.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="apple-touch-icon not found")
@app.get("/favicon-32x32.png")
async def _favicon_32():
p = f"{FAV_DIR}/favicon-32x32.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="favicon-32x32 not found")
@app.get("/favicon-16x16.png")
async def _favicon_16():
p = f"{FAV_DIR}/favicon-16x16.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="favicon-16x16 not found")
@app.get("/android-chrome-192x192.png")
async def _android_192():
p = f"{FAV_DIR}/android-chrome-192x192.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="android-chrome-192x192 not found")
@app.get("/android-chrome-512x512.png")
async def _android_512():
p = f"{FAV_DIR}/android-chrome-512x512.png"
try:
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="android-chrome-512x512 not found")
@app.get("/site.webmanifest")
async def _site_manifest():
p = f"{FAV_DIR}/site.webmanifest"
try:
return FileResponse(p, media_type="application/manifest+json")
except Exception:
raise HTTPException(status_code=404, detail="site.webmanifest not found")
# Custom APNG favicon for "busy" state in UI
@app.get("/saya1.png")
async def _apng_busy_icon():
p = f"{FAV_DIR}/saya1.png"
try:
# APNG served as image/png is acceptable for browsers
return FileResponse(p, media_type="image/png")
except Exception:
raise HTTPException(status_code=404, detail="saya1.png not found")
# Variable store API (per-pipeline)
@app.get("/admin/vars")
async def get_vars() -> JSONResponse:
@@ -640,7 +658,37 @@ def create_app() -> FastAPI:
# Admin API для пайплайна
@app.get("/admin/pipeline")
async def get_pipeline() -> JSONResponse:
return JSONResponse(load_pipeline())
p = load_pipeline()
# Диагностический лог состава meta (для подтверждения DRY-рефакторинга)
try:
meta_keys = [
"id","name","parallel_limit","loop_mode","loop_max_iters","loop_time_budget_ms","clear_var_store",
"http_timeout_sec","text_extract_strategy","text_extract_json_path","text_join_sep","text_extract_presets"
]
present = [k for k in meta_keys if k in p]
meta_preview = {k: p.get(k) for k in present if k != "text_extract_presets"}
presets_count = 0
try:
presets = p.get("text_extract_presets")
if isinstance(presets, list):
presets_count = len(presets)
except Exception:
presets_count = 0
logger.info(
"%s",
json.dumps(
{
"event": "admin_get_pipeline_meta",
"keys": present,
"presets_count": presets_count,
"meta_preview": meta_preview,
},
ensure_ascii=False,
),
)
except Exception:
pass
return JSONResponse(p)
@app.post("/admin/pipeline")
async def set_pipeline(request: Request) -> JSONResponse:
@@ -652,6 +700,37 @@ def create_app() -> FastAPI:
# простая проверка
if not isinstance(pipeline, dict) or "nodes" not in pipeline:
raise HTTPException(status_code=400, detail="Invalid pipeline format")
# Диагностический лог входящих meta-ключей перед сохранением
try:
meta_keys = [
"id","name","parallel_limit","loop_mode","loop_max_iters","loop_time_budget_ms","clear_var_store",
"http_timeout_sec","text_extract_strategy","text_extract_json_path","text_join_sep","text_extract_presets"
]
present = [k for k in meta_keys if k in pipeline]
meta_preview = {k: pipeline.get(k) for k in present if k != "text_extract_presets"}
presets_count = 0
try:
presets = pipeline.get("text_extract_presets")
if isinstance(presets, list):
presets_count = len(presets)
except Exception:
presets_count = 0
logger.info(
"%s",
json.dumps(
{
"event": "admin_set_pipeline_meta",
"keys": present,
"presets_count": presets_count,
"meta_preview": meta_preview,
},
ensure_ascii=False,
),
)
except Exception:
pass
save_pipeline(pipeline)
return JSONResponse({"ok": True})
@@ -682,6 +761,9 @@ def create_app() -> FastAPI:
# --- Manual cancel/clear for pipeline execution ---
@app.post("/admin/cancel")
async def admin_cancel() -> JSONResponse:
"""
Graceful cancel: do not interrupt in-flight operations; stop before next step.
"""
try:
p = load_pipeline()
pid = p.get("id", "pipeline_editor")
@@ -689,10 +771,27 @@ def create_app() -> FastAPI:
p = default_pipeline()
pid = p.get("id", "pipeline_editor")
try:
request_cancel(pid)
request_cancel(pid, mode="graceful")
except Exception:
pass
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": True})
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": True, "mode": "graceful"})
@app.post("/admin/cancel/abort")
async def admin_cancel_abort() -> JSONResponse:
"""
Hard abort: attempt to interrupt in-flight operations immediately.
"""
try:
p = load_pipeline()
pid = p.get("id", "pipeline_editor")
except Exception:
p = default_pipeline()
pid = p.get("id", "pipeline_editor")
try:
request_cancel(pid, mode="abort")
except Exception:
pass
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": True, "mode": "abort"})
@app.post("/admin/cancel/clear")
async def admin_cancel_clear() -> JSONResponse:
@@ -708,7 +807,380 @@ def create_app() -> FastAPI:
pass
return JSONResponse({"ok": True, "pipeline_id": pid, "cancelled": False})
# --- SSE endpoint for live pipeline trace ---
# --- Manual HTTP resend endpoint (Burp-like Repeater for Logs) -----------------
@app.post("/admin/http/manual-send")
async def manual_send(request: Request) -> JSONResponse:
"""
Re-send an HTTP request from Logs with optional edits from UI.
Accepts JSON:
{
"req_id": "original-req-id", // required to fetch original (untrimmed) body if available
"request_text": "METHOD URL HTTP/1.1\\nH: V\\n\\n{...}", // optional raw edited HTTP text from UI
"prefer_registry_original": true, // use untrimmed original JSON body where possible
// Optional explicit overrides (take precedence over parsed request_text):
"method": "POST",
"url": "https://example/api",
"headers": { "Authorization": "Bearer [[VAR:incoming.headers.authorization]]" },
"body_text": "{...}" // explicit body text override (string)
}
Behavior:
- Parses request_text into method/url/headers/body if provided.
- Looks up original untrimmed body_json by req_id from executor registry.
- If prefer_registry_original and edited body parses as JSON — deep-merge it onto original JSON (dicts merged, lists replaced).
- If prefer_registry_original and edited body contains human preview fragments (e.g. trimmed) or fails JSON parse — try to extract the last JSON object from text; else fallback to original body_json.
- Resolves [[...]] and {{ ... }} macros (URL/headers/body) against last STORE snapshot (vars + snapshot.OUT/etc) of the pipeline.
- Emits http_req/http_resp SSE with a fresh req_id ('manual-<ts>') so the original log is never overwritten.
"""
try:
payload = await request.json()
except Exception:
payload = {}
# Parse edited HTTP text (Request area)
def _parse_http_text(s: str) -> tuple[str, str, Dict[str, str], str]:
method, url = "POST", ""
headers: Dict[str, str] = {}
body = ""
try:
if not isinstance(s, str) or not s.strip():
return method, url, headers, body
txt = s.replace("\r\n", "\n")
lines = txt.split("\n")
if not lines:
return method, url, headers, body
first = (lines[0] or "").strip()
import re as _re
m = _re.match(r"^([A-Z]+)\s+(\S+)(?:\s+HTTP/\d+(?:\.\d+)?)?$", first)
i = 1
if m:
method = (m.group(1) or "POST").strip().upper()
url = (m.group(2) or "").strip()
else:
i = 0 # no start line → treat as headers/body only
def _is_header_line(ln: str) -> bool:
if ":" not in ln:
return False
name = ln.split(":", 1)[0].strip()
# HTTP token: allow only letters/digits/hyphen. This prevents JSON lines like "contents": ... being treated as headers.
return bool(_re.fullmatch(r"[A-Za-z0-9\\-]+", name))
# Read headers until a blank line OR until a non-header-looking line (start of body)
while i < len(lines):
ln = lines[i]
if ln.strip() == "":
i += 1
break
if not _is_header_line(ln):
# Assume this and the rest is body (e.g., starts with {, [, or a quoted key)
break
k, v = ln.split(":", 1)
headers[str(k).strip()] = str(v).strip()
i += 1
# Remainder is the body (can be JSON or any text)
body = "\\n".join(lines[i:]) if i < len(lines) else ""
except Exception:
pass
return method, url, headers, body
# Lookup original (untrimmed) body by req_id
orig: Optional[Dict[str, Any]] = None
try:
orig = _get_http_req(str(payload.get("req_id") or ""))
except Exception:
orig = None
# Pipeline meta (timeout) and pipeline id
try:
p = load_pipeline()
default_pid = p.get("id", "pipeline_editor")
timeout_sec = float(p.get("http_timeout_sec", 60) or 60)
except Exception:
default_pid = "pipeline_editor"
timeout_sec = 60.0
pid = str((orig or {}).get("pipeline_id") or default_pid)
# Build macro context from STORE (last snapshot)
try:
store = load_var_store(pid) or {}
except Exception:
store = {}
snapshot = store.get("snapshot") or {}
ctx: Dict[str, Any] = {}
try:
ctx.update({
"incoming": snapshot.get("incoming"),
"params": snapshot.get("params"),
"model": snapshot.get("model"),
"vendor_format": snapshot.get("vendor_format"),
"system": snapshot.get("system") or "",
})
except Exception:
pass
try:
ctx["OUT"] = snapshot.get("OUT") or {}
except Exception:
ctx["OUT"] = {}
try:
vmap = dict(store)
vmap.pop("snapshot", None)
ctx["vars"] = vmap
ctx["store"] = store
except Exception:
ctx["vars"] = {}
ctx["store"] = store or {}
# Extract overrides / edited request data
edited_text = payload.get("request_text") or ""
ov_method = payload.get("method")
ov_url = payload.get("url")
ov_headers = payload.get("headers") if isinstance(payload.get("headers"), dict) else None
ov_body_text = payload.get("body_text")
prefer_orig = bool(payload.get("prefer_registry_original", True))
# Parse HTTP text (safe)
m_parsed, u_parsed, h_parsed, b_parsed = parse_editable_http(edited_text)
# Compose method/url/headers
method = str(ov_method or m_parsed or (orig or {}).get("method") or "POST").upper()
url = str(ov_url or u_parsed or (orig or {}).get("url") or "")
# headers: start from original -> parsed -> explicit override
headers: Dict[str, Any] = {}
try:
if isinstance((orig or {}).get("headers"), dict):
headers.update(orig.get("headers") or {})
except Exception:
pass
try:
headers.update(h_parsed or {})
except Exception:
pass
try:
if isinstance(ov_headers, dict):
headers.update(ov_headers)
except Exception:
pass
# Render macros in URL and headers
try:
if url:
url = render_template_simple(str(url), ctx, ctx.get("OUT") or {})
except Exception:
pass
try:
rendered_headers: Dict[str, Any] = {}
for k, v in headers.items():
try:
rendered_headers[k] = render_template_simple(str(v), ctx, ctx.get("OUT") or {})
except Exception:
rendered_headers[k] = v
headers = rendered_headers
except Exception:
pass
# Normalize/dedupe headers (case-insensitive) and drop auto-calculated ones
headers = dedupe_headers(headers)
# Determine body (JSON vs text), preserving original untrimmed JSON
# Build orig_json (prefer registry; fallback parse from original body_text)
orig_json = (orig or {}).get("body_json") if isinstance(orig, dict) else None
if orig_json is None:
try:
ob = (orig or {}).get("body_text")
except Exception:
ob = None
if isinstance(ob, str):
try:
ob_norm = normalize_jsonish_text(ob)
except Exception:
ob_norm = ob
_oj = try_parse_json(ob_norm) or extract_json_trailing(ob_norm)
if _oj is not None:
orig_json = _oj
# Resolve body edits through macros
raw_edited_body_text = ov_body_text if ov_body_text is not None else b_parsed
try:
edited_body_text_resolved = render_template_simple(str(raw_edited_body_text or ""), ctx, ctx.get("OUT") or {})
except Exception:
edited_body_text_resolved = str(raw_edited_body_text or "")
# Compute final_json / final_text using helper (handles normalization, salvage, prefer_registry_original, content-type)
final_json, final_text = salvage_json_for_send(
edited_body_text_resolved,
headers,
orig_json,
prefer_orig
)
# Diagnostic: summarize merge decision without leaking payload
try:
def _summ(v):
try:
if v is None:
return {"t": "none"}
if isinstance(v, dict):
return {"t": "dict", "keys": len(v)}
if isinstance(v, list):
return {"t": "list", "len": len(v)}
if isinstance(v, str):
return {"t": "str", "len": len(v)}
return {"t": type(v).__name__}
except Exception:
return {"t": "err"}
norm_dbg = normalize_jsonish_text(edited_body_text_resolved)
edited_json_dbg = try_parse_json(norm_dbg) or extract_json_trailing(norm_dbg)
logger.info(
"%s",
json.dumps(
{
"event": "manual_send_merge_debug",
"req_id_original": str(payload.get("req_id") or ""),
"prefer_registry_original": prefer_orig,
"headers_content_type": ("json" if content_type_is_json(headers) else "other"),
"orig_json": _summ(orig_json),
"edited_json": _summ(edited_json_dbg),
"final": {
"json": _summ(final_json),
"text_len": (len(final_text) if isinstance(final_text, str) else None)
},
},
ensure_ascii=False,
),
)
except Exception:
pass
# Fresh req_id to avoid any overwrite of original log
import time as _time
rid = f"manual-{int(_time.time()*1000)}"
async def _publish(evt: Dict[str, Any]) -> None:
try:
await _trace_hub.publish(evt)
except Exception:
pass
# Prepare request body for logs (sanitized/trimmed for base64)
if final_json is not None:
try:
body_text_for_log = json.dumps(_san_b64(final_json, max_len=180), ensure_ascii=False, indent=2)
except Exception:
body_text_for_log = json.dumps(final_json, ensure_ascii=False)
else:
try:
body_text_for_log = _san_json_str(str(final_text or ""), max_len=180)
except Exception:
body_text_for_log = str(final_text or "")
# Register manual request in registry so subsequent "send" on this log has an original JSON source
try:
register_manual_request(rid, {
"pipeline_id": pid,
"node_id": "manual",
"node_type": "Manual",
"method": method,
"url": url,
"headers": dict(headers),
"body_json": (final_json if final_json is not None else None),
"body_text": (None if final_json is not None else str(final_text or "")),
})
except Exception:
pass
# Emit http_req SSE (Manual)
await _publish({
"event": "http_req",
"node_id": "manual",
"node_type": "Manual",
"provider": "manual",
"req_id": rid,
"method": method,
"url": url,
"headers": headers,
"body_text": body_text_for_log,
"ts": int(_time.time()*1000),
})
# Perform HTTP
async with build_client(timeout=timeout_sec) as client:
# Ensure JSON Content-Type when sending JSON
try:
if final_json is not None:
has_ct = any((str(k or "").lower() == "content-type") for k in headers.keys())
if not has_ct:
headers["Content-Type"] = "application/json"
except Exception:
pass
content = None
try:
if method in {"GET", "HEAD"}:
content = None
else:
if final_json is not None:
content = json.dumps(final_json, ensure_ascii=False).encode("utf-8")
else:
content = (final_text or "").encode("utf-8")
except Exception:
content = None
# Send
try:
resp = await client.request(method, url, headers=headers, content=content)
except Exception as e:
# Network/client error — emit http_resp with error text
await _publish({
"event": "http_resp",
"node_id": "manual",
"node_type": "Manual",
"provider": "manual",
"req_id": rid,
"status": 0,
"headers": {},
"body_text": str(e),
"ts": int(_time.time()*1000),
})
return JSONResponse({"ok": False, "error": str(e), "req_id": rid})
# Build response body for log (prefer JSON with trimmed base64)
try:
try:
obj = resp.json()
body_text_resp = json.dumps(_san_b64(obj, max_len=180), ensure_ascii=False, indent=2)
except Exception:
try:
t = await resp.aread()
body_text_resp = t.decode(getattr(resp, "encoding", "utf-8") or "utf-8", errors="replace")
except Exception:
try:
body_text_resp = resp.text
except Exception:
body_text_resp = "<resp.decode error>"
except Exception:
body_text_resp = "<resp.decode error>"
await _publish({
"event": "http_resp",
"node_id": "manual",
"node_type": "Manual",
"provider": "manual",
"req_id": rid,
"status": int(getattr(resp, "status_code", 0)),
"headers": dict(getattr(resp, "headers", {})),
"body_text": body_text_resp,
"ts": int(_time.time()*1000),
})
return JSONResponse({"ok": True, "req_id": rid})
# --- SSE endpoint for live pipeline trace --- # --- SSE endpoint for live pipeline trace ---
@app.get("/admin/trace/stream")
async def sse_trace() -> StreamingResponse:
loop = _asyncio.get_event_loop()

View File

@@ -6,14 +6,25 @@ import threading
# Simple in-process cancel flags storage (per pipeline_id)
# Thread-safe for FastAPI workers in same process
_cancel_flags: Dict[str, bool] = {}
# Mode of cancellation per pipeline: "graceful" (default) or "abort"
_cancel_modes: Dict[str, str] = {}
_lock = threading.Lock()
def request_cancel(pipeline_id: str) -> None:
"""Set cancel flag for given pipeline id."""
def request_cancel(pipeline_id: str, mode: str = "graceful") -> None:
"""Set cancel flag for given pipeline id with an optional mode.
mode:
- "graceful": do not interrupt in-flight operations, stop before next step
- "abort": attempt to cancel in-flight operations immediately
"""
pid = str(pipeline_id or "pipeline_editor")
m = str(mode or "graceful").lower().strip()
if m not in {"graceful", "abort"}:
m = "graceful"
with _lock:
_cancel_flags[pid] = True
_cancel_modes[pid] = m
def clear_cancel(pipeline_id: str) -> None:
@@ -21,10 +32,19 @@ def clear_cancel(pipeline_id: str) -> None:
pid = str(pipeline_id or "pipeline_editor")
with _lock:
_cancel_flags.pop(pid, None)
_cancel_modes.pop(pid, None)
def is_cancelled(pipeline_id: str) -> bool:
"""Check cancel flag for given pipeline id."""
pid = str(pipeline_id or "pipeline_editor")
with _lock:
return bool(_cancel_flags.get(pid, False))
return bool(_cancel_flags.get(pid, False))
def get_cancel_mode(pipeline_id: str) -> str:
"""Return current cancel mode for given pipeline id: 'graceful' or 'abort' (default graceful)."""
pid = str(pipeline_id or "pipeline_editor")
with _lock:
m = _cancel_modes.get(pid)
return m if m in {"graceful", "abort"} else "graceful"

View File

@@ -0,0 +1,415 @@
from __future__ import annotations
import json
import re
from typing import Any, Dict, Optional, Tuple
# Reuse executor's registry for original (untrimmed) requests
try:
from agentui.pipeline.executor import register_http_request as _reg_http_req # type: ignore
except Exception: # pragma: no cover
_reg_http_req = None # type: ignore
# -------- HTTP editable text parser (safe) --------
def parse_editable_http(s: str) -> Tuple[str, str, Dict[str, str], str]:
"""
Parse text pasted from Request area into (method, url, headers, body_text).
Stops header parsing when a line is not a valid HTTP header key (prevents treating JSON like '"contents": ...' as header).
"""
method, url = "POST", ""
headers: Dict[str, str] = {}
body = ""
try:
if not isinstance(s, str) or not s.strip():
return method, url, headers, body
txt = s.replace("\r\n", "\n")
lines = txt.split("\n")
if not lines:
return method, url, headers, body
first = (lines[0] or "").strip()
m = re.match(r"^([A-Z]+)\s+(\S+)(?:\s+HTTP/\d+(?:\.\d+)?)?$", first)
i = 1
if m:
method = (m.group(1) or "POST").strip().upper()
url = (m.group(2) or "").strip()
else:
i = 0 # no start-line -> treat as headers/body only
def _is_header_line(ln: str) -> bool:
if ":" not in ln:
return False
name = ln.split(":", 1)[0].strip()
# HTTP token: only letters/digits/hyphen. Prevents JSON keys like "contents": from being treated as headers.
return bool(re.fullmatch(r"[A-Za-z0-9\-]+", name))
# Read headers until blank line OR until line not looking like header (start of body)
while i < len(lines):
ln = lines[i]
if ln.strip() == "":
i += 1
break
if not _is_header_line(ln):
break
k, v = ln.split(":", 1)
headers[str(k).strip()] = str(v).strip()
i += 1
# Remainder is body (JSON or text)
body = "\n".join(lines[i:]) if i < len(lines) else ""
except Exception:
pass
return method, url, headers, body
# -------- Headers helpers --------
def dedupe_headers(h: Dict[str, Any]) -> Dict[str, Any]:
"""
Case-insensitive dedupe; drop Host/Content-Length (httpx will set proper).
Last value wins.
"""
try:
dedup: Dict[str, Tuple[str, Any]] = {}
for k, v in (h or {}).items():
lk = str(k).strip().lower()
if lk in {"host", "content-length"}:
continue
dedup[lk] = (k, v)
return {orig_k: val for (_, (orig_k, val)) in dedup.items()}
except Exception:
return dict(h or {})
def content_type_is_json(h: Dict[str, Any]) -> bool:
try:
return any(str(k).lower() == "content-type" and "json" in str(v).lower() for k, v in (h or {}).items())
except Exception:
return False
# -------- JSON parsing & normalization helpers --------
def try_parse_json(s: Any) -> Optional[Any]:
try:
if isinstance(s, (dict, list)):
return s
if isinstance(s, str) and s.strip():
return json.loads(s)
except Exception:
return None
return None
def normalize_jsonish_text(s: Any) -> str:
"""
Normalize JSON-looking text safely:
- If whole text is a quoted JSON string, decode via json.loads to inner string.
- Replace visible \\n/\\r/\\t outside JSON string literals with real control chars.
- Escape raw CR/LF/TAB inside JSON string literals as \\n/\\r/\\t to keep JSON valid.
"""
try:
txt = str(s if s is not None else "")
except Exception:
return ""
# If whole text looks like a quoted JSON string: decode to inner string
try:
if len(txt) >= 2 and txt[0] == '"' and txt[-1] == '"':
v = json.loads(txt)
if isinstance(v, str):
txt = v
except Exception:
pass
out_chars = []
i = 0
n = len(txt)
in_str = False
esc = False
while i < n:
ch = txt[i]
if in_str:
# escape raw control chars within JSON string literal
if ch == "\r":
# CRLF -> \n
if (i + 1) < n and txt[i + 1] == "\n":
out_chars.append("\\n")
i += 2
esc = False
continue
out_chars.append("\\r")
i += 1
esc = False
continue
if ch == "\n":
out_chars.append("\\n")
i += 1
esc = False
continue
if ch == "\t":
out_chars.append("\\t")
i += 1
esc = False
continue
out_chars.append(ch)
if esc:
esc = False
else:
if ch == "\\":
esc = True
elif ch == '"':
in_str = False
i += 1
continue
# not in string literal
if ch == '"':
in_str = True
out_chars.append(ch)
i += 1
continue
if ch == "\\" and (i + 1) < n:
nx = txt[i + 1]
if nx == "n":
out_chars.append("\n")
i += 2
continue
if nx == "r":
out_chars.append("\r")
i += 2
continue
if nx == "t":
out_chars.append("\t")
i += 2
continue
out_chars.append(ch)
i += 1
return "".join(out_chars)
def extract_json_trailing(s: str) -> Optional[Any]:
"""
Pull trailing JSON object/array from mixed text:
- Try whole text first
- Then scan from last '{' or '[' backward.
"""
try:
if not isinstance(s, str):
return None
txt = s.strip()
try:
return json.loads(txt)
except Exception:
pass
idx = txt.rfind("{")
while idx >= 0:
seg = txt[idx:]
try:
return json.loads(seg)
except Exception:
idx = txt.rfind("{", 0, idx)
idx = txt.rfind("[")
while idx >= 0:
seg = txt[idx:]
try:
return json.loads(seg)
except Exception:
idx = txt.rfind("[", 0, idx)
return None
except Exception:
return None
def global_unescape_jsonish(s: str) -> str:
"""
Last-resort: unicode_escape decode to convert \\n -> \n, \\" -> ", \\\\ -> \, \\uXXXX -> char, etc.
"""
try:
import codecs as _codecs
return _codecs.decode(s, "unicode_escape")
except Exception:
try:
return (
s.replace("\\n", "\n")
.replace("\\r", "\r")
.replace("\\t", "\t")
.replace('\\"', '"')
.replace("\\\\", "\\")
)
except Exception:
return s
def looks_jsonish(txt: Any) -> bool:
try:
s = str(txt or "")
if "{" in s or "[" in s:
return True
# also patterns like key:
return bool(re.search(r'\s["\']?[A-Za-z0-9_\-]+["\']?\s*:', s))
except Exception:
return False
def deep_merge_dicts(a: Any, b: Any) -> Any:
"""
Merge dicts (b over a, recursively). Lists or non-dicts are replaced by b.
"""
if isinstance(a, dict) and isinstance(b, dict):
out = dict(a)
for k, v in b.items():
if (k in a) and isinstance(a.get(k), dict) and isinstance(v, dict):
out[k] = deep_merge_dicts(a.get(k), v)
else:
out[k] = v
return out
return b
# ---- Trim-aware merge that preserves original binary/base64 fields ----
def is_trimmed_b64_string(s: Any) -> bool:
try:
if not isinstance(s, str):
return False
return "(trimmed " in s
except Exception:
return False
def looks_base64ish(s: Any) -> bool:
try:
if not isinstance(s, str) or len(s) < 64:
return False
return bool(re.fullmatch(r"[A-Za-z0-9+/=\r\n]+", s))
except Exception:
return False
def merge_lists_preserving_b64(orig_list: Any, edited_list: Any) -> Any:
"""
Merge lists with base64-trimmed preservation but DO NOT pad from original:
- Result length equals edited_list length (indices beyond edited are dropped).
- At each index:
* If edited value is a trimmed placeholder string and original has a string → keep original.
* If both dicts → recurse via deep_merge_preserving_b64.
* If both lists → recurse via merge_lists_preserving_b64.
* Else → take edited value as-is.
"""
if not isinstance(edited_list, list):
return edited_list
if not isinstance(orig_list, list):
orig_list = []
out = []
for i, ev in enumerate(edited_list):
ov = orig_list[i] if i < len(orig_list) else None
if isinstance(ev, str) and is_trimmed_b64_string(ev) and isinstance(ov, str):
out.append(ov)
elif isinstance(ev, dict) and isinstance(ov, dict):
out.append(deep_merge_preserving_b64(ov, ev))
elif isinstance(ev, list) and isinstance(ov, list):
out.append(merge_lists_preserving_b64(ov, ev))
else:
out.append(ev)
return out
def deep_merge_preserving_b64(orig: Any, edited: Any) -> Any:
"""
Merge preserving original base64/data_url only for trimmed placeholders, with strict edited-shape:
- If edited is a trimmed placeholder string and orig is a string → keep orig.
- Dicts: RESULT CONTAINS ONLY KEYS FROM EDITED. Keys missing in edited are treated as deleted.
For each present key: recurse (dict/list) or take edited value; for trimmed strings keep orig.
- Lists: delegate to merge_lists_preserving_b64 (result length = edited length).
- Other types: replace with edited.
"""
if isinstance(edited, str) and is_trimmed_b64_string(edited) and isinstance(orig, str):
return orig
if isinstance(orig, dict) and isinstance(edited, dict):
out: Dict[str, Any] = {}
for k, ev in edited.items():
ov = orig.get(k)
if isinstance(ev, str) and is_trimmed_b64_string(ev) and isinstance(ov, str):
out[k] = ov
elif isinstance(ev, dict) and isinstance(ov, dict):
out[k] = deep_merge_preserving_b64(ov, ev)
elif isinstance(ev, list) and isinstance(ov, list):
out[k] = merge_lists_preserving_b64(ov, ev)
else:
out[k] = ev
return out
if isinstance(orig, list) and isinstance(edited, list):
return merge_lists_preserving_b64(orig, edited)
return edited
def salvage_json_for_send(
edited_body_text: Any,
headers: Dict[str, Any],
orig_json: Optional[Any],
prefer_registry_original: bool = True,
) -> Tuple[Optional[Any], Optional[str]]:
"""
Build (final_json, final_text) for outgoing request body.
Strategy:
- Normalize text for JSON.
- Try parse; then try trailing extract; then unicode_escape unescape and retry.
- If prefer_registry_original=True and orig_json present:
* If edited_json present: deep-merge with base64 preservation, but ONLY keep keys present in edited;
lists are limited to the edited length (no padding from original).
* If not: DO NOT resurrect original. Empty/whitespace → send empty text; otherwise send raw text as-is.
- Else:
* If edited_json present => final_json = edited_json
* Else: if content-type is json and orig_json present => final_json = orig_json
else send raw text.
"""
# Normalize and attempt parse
norm = normalize_jsonish_text(edited_body_text)
edited_json = try_parse_json(norm)
if edited_json is None:
edited_json = extract_json_trailing(norm)
if edited_json is None:
ue = global_unescape_jsonish(str(edited_body_text or ""))
if isinstance(ue, str) and ue != edited_body_text:
ue_norm = normalize_jsonish_text(ue)
edited_json = try_parse_json(ue_norm) or extract_json_trailing(ue_norm)
json_ct = content_type_is_json(headers)
# Prefer original registry JSON where applicable
if prefer_registry_original and orig_json is not None:
if edited_json is None:
# Respect full manual control: do NOT resurrect original JSON.
# Empty/whitespace → send empty text; otherwise send raw text as-is.
if isinstance(norm, str) and not norm.strip():
return None, ""
else:
return None, str(edited_body_text or "")
else:
# Merge edits over original with trimmed-b64 preservation, but keep only keys present in edited
# and limit lists to the edited length.
return deep_merge_preserving_b64(orig_json, edited_json), None
# No prefer or no orig_json
if edited_json is not None:
return edited_json, None
if json_ct and orig_json is not None:
# Hard salvage for declared JSON payloads
maybe = try_parse_json(norm) or extract_json_trailing(norm)
return (maybe if maybe is not None else orig_json), None
# Plain text fallback
return None, str(edited_body_text or "")
# -------- Registry wrapper --------
def register_manual_request(req_id: str, info: Dict[str, Any]) -> None:
try:
if _reg_http_req:
_reg_http_req(req_id, info)
except Exception:
pass

View File

@@ -86,6 +86,41 @@ def _read_kv_from_proxy_file() -> Dict[str, str]:
return out
return out
def _read_second_bare_flag_from_proxy() -> Optional[bool]:
"""
Читает «вторую голую строку» после URL в proxy.txt и интерпретирует как флаг verify:
true/1/yes/on -> True
false/0/no/off -> False
Возвращает None, если строка отсутствует или не распознана.
"""
try:
p = Path("proxy.txt")
if not p.exists():
return None
lines = [ln.strip() for ln in p.read_text(encoding="utf-8").splitlines()]
# найдём первую «URL» строку (без '=' и не пустую/коммент)
idx_url = -1
for i, ln in enumerate(lines):
if not ln or ln.startswith("#") or "=" in ln:
continue
idx_url = i
break
if idx_url >= 0:
# ищем следующую «голую» строку
for j in range(idx_url + 1, len(lines)):
ln = lines[j].strip()
if not ln or ln.startswith("#") or "=" in ln:
continue
low = ln.lower()
if low in ("1", "true", "yes", "on"):
return True
if low in ("0", "false", "no", "off"):
return False
# если это не похожее на флаг — считаем отсутствующим
break
except Exception:
return None
return None
def get_tls_verify() -> Union[bool, str]:
"""
Возвращает значение для параметра httpx.AsyncClient(verify=...):
@@ -119,31 +154,11 @@ def get_tls_verify() -> Union[bool, str]:
if path.exists():
return str(path)
# 2.1) Дополнительно: поддержка второй строки без ключа — true/false
try:
p = Path("proxy.txt")
if p.exists():
lines = [ln.strip() for ln in p.read_text(encoding="utf-8").splitlines()]
# найдём первую «URL» строку (без '=' и не пустую/коммент)
idx_url = -1
for i, ln in enumerate(lines):
if not ln or ln.startswith("#") or "=" in ln:
continue
idx_url = i
break
if idx_url >= 0:
# ищем следующую «голую» строку
for j in range(idx_url + 1, len(lines)):
ln = lines[j].strip()
if not ln or ln.startswith("#") or "=" in ln:
continue
low = ln.lower()
if low in ("1", "true", "yes", "on"):
return True
if low in ("0", "false", "no", "off"):
return False
# если это не похожее на флаг, игнорируем и продолжаем
except Exception:
pass
second = _read_second_bare_flag_from_proxy()
if second is True:
return True
if second is False:
return False
# 3) Файл по умолчанию в корне проекта
default_ca = Path("proxy-ca.pem")
@@ -173,26 +188,9 @@ def is_verify_explicit() -> bool:
if "verify" in kv or "ca" in kv:
return True
# Вторая «голая» строка как явный флаг
try:
p = Path("proxy.txt")
if p.exists():
lines = [ln.strip() for ln in p.read_text(encoding="utf-8").splitlines()]
idx_url = -1
for i, ln in enumerate(lines):
if not ln or ln.startswith("#") or "=" in ln:
continue
idx_url = i
break
if idx_url >= 0:
for j in range(idx_url + 1, len(lines)):
ln = lines[j].strip()
if not ln or ln.startswith("#") or "=" in ln:
continue
if ln.lower() in ("1", "0", "true", "false", "yes", "no", "on", "off"):
return True
break
except Exception:
pass
second = _read_second_bare_flag_from_proxy()
if second is not None:
return True
if Path("proxy-ca.pem").exists():
return True

File diff suppressed because it is too large Load Diff

View File

@@ -11,17 +11,93 @@ PRESETS_DIR = Path("presets")
VARS_DIR = Path(".agentui") / "vars"
# DRY нормализация meta/пайплайна: единый источник дефолтов и типов
def normalize_pipeline(pipeline: Dict[str, Any]) -> Dict[str, Any]:
"""
Приводит верхнеуровневые ключи пайплайна к согласованному виду, заполняет дефолты.
Безопасно к отсутствующим ключам и неверным типам.
"""
if not isinstance(pipeline, dict):
pipeline = {}
out: Dict[str, Any] = dict(pipeline)
def _to_int(v, d):
try:
n = int(v)
return n if n > 0 else d
except Exception:
return d
def _to_float(v, d):
try:
n = float(v)
return n if n > 0 else d
except Exception:
return d
# Базовые поля
out["id"] = str(out.get("id") or "pipeline_editor")
out["name"] = str(out.get("name") or "Edited Pipeline")
out["parallel_limit"] = _to_int(out.get("parallel_limit"), 8)
out["loop_mode"] = str(out.get("loop_mode") or "dag")
out["loop_max_iters"] = _to_int(out.get("loop_max_iters"), 1000)
out["loop_time_budget_ms"] = _to_int(out.get("loop_time_budget_ms"), 10000)
out["clear_var_store"] = bool(out.get("clear_var_store", True))
out["http_timeout_sec"] = _to_float(out.get("http_timeout_sec"), 60)
# Глобальные опции извлечения текста для [[OUTx]]
out["text_extract_strategy"] = str(out.get("text_extract_strategy") or "auto")
out["text_extract_json_path"] = str(out.get("text_extract_json_path") or "")
# Поддержка разных написаний text_join_sep
join_sep = out.get("text_join_sep")
if join_sep is None:
for k in list(out.keys()):
if isinstance(k, str) and k.lower() == "text_join_sep":
join_sep = out.get(k)
break
out["text_join_sep"] = str(join_sep or "\n")
# Пресеты парсинга
presets = out.get("text_extract_presets")
norm_presets: List[Dict[str, Any]] = []
if isinstance(presets, list):
for i, it in enumerate(presets):
if not isinstance(it, dict):
continue
norm_presets.append({
"id": str(it.get("id") or f"p{i}"),
"name": str(it.get("name") or it.get("json_path") or "Preset"),
"strategy": str(it.get("strategy") or "auto"),
"json_path": str(it.get("json_path") or ""),
"join_sep": str(it.get("join_sep") or "\n"),
})
out["text_extract_presets"] = norm_presets
# Узлы — список
try:
nodes = out.get("nodes") or []
if not isinstance(nodes, list):
nodes = []
out["nodes"] = nodes
except Exception:
out["nodes"] = []
return out
def load_pipeline() -> Dict[str, Any]:
if PIPELINE_FILE.exists():
try:
return json.loads(PIPELINE_FILE.read_text(encoding="utf-8"))
except Exception:
pass
return default_pipeline()
if PIPELINE_FILE.exists():
try:
data = json.loads(PIPELINE_FILE.read_text(encoding="utf-8"))
return normalize_pipeline(data)
except Exception:
pass
return normalize_pipeline(default_pipeline())
def save_pipeline(pipeline: Dict[str, Any]) -> None:
PIPELINE_FILE.write_text(json.dumps(pipeline, ensure_ascii=False, indent=2), encoding="utf-8")
norm = normalize_pipeline(pipeline or {})
PIPELINE_FILE.write_text(json.dumps(norm, ensure_ascii=False, indent=2), encoding="utf-8")
def list_presets() -> List[str]:

View File

@@ -35,6 +35,11 @@ _BARE_MACRO_RE = re.compile(r"\[\[\s*([A-Za-z_][A-Za-z0-9_]*(?:\.[^\]]+?)?)\s*\]
# Разбираем выражение до ближайшего '}}', допускаем '}' внутри (например в JSON-литералах)
_BRACES_RE = re.compile(r"\{\{\s*(.*?)\s*\}\}", re.DOTALL)
# Сокращённый синтаксис: img(mime?)[[...]] → data:<mime>;base64,<resolved_inner_macro>
# Пример: img()[[OUT1]] → data:image/png;base64,{{resolved OUT1}}
# img(jpeg)[[OUT:n1.result...]] → data:image/jpeg;base64,{{resolved}}
_IMG_WRAPPER_RE = re.compile(r"(?is)img\(\s*([^)]+?)?\s*\)\s*\[\[\s*(.+?)\s*\]\]")
def _split_path(path: str) -> List[str]:
return [p.strip() for p in str(path).split(".") if str(p).strip()]
@@ -164,12 +169,21 @@ def _best_text_from_outputs(node_out: Any) -> str:
# Gemini
try:
if isinstance(base, dict):
cand0 = (base.get("candidates") or [{}])[0]
content = cand0.get("content") or {}
parts0 = (content.get("parts") or [{}])[0]
t = parts0.get("text")
if isinstance(t, str):
return t
cands = base.get("candidates") or []
texts: List[str] = []
for cand in cands:
try:
content = cand.get("content") or {}
parts = content.get("parts") or []
for p in parts:
if isinstance(p, dict):
t = p.get("text")
if isinstance(t, str) and t.strip():
texts.append(t.strip())
except Exception:
continue
if texts:
return "\n".join(texts)
except Exception:
pass
@@ -203,6 +217,47 @@ def render_template_simple(template: str, context: Dict[str, Any], out_map: Dict
return ""
s = str(template)
# 0) Сокращённый синтаксис: img(mime?)[[...]] → data:<mime>;base64,<resolved>
# Выполняем до развёртки обычных [[...]] макросов, чтобы внутри можно было использовать любой квадратный макрос.
def _normalize_mime(m: str) -> str:
mm = (m or "").strip().lower()
if not mm:
return "image/png"
if "/" in mm:
return mm
return {
"png": "image/png",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"webp": "image/webp",
"gif": "image/gif",
"svg": "image/svg+xml",
"bmp": "image/bmp",
"tif": "image/tiff",
"tiff": "image/tiff",
}.get(mm, mm)
def _repl_imgwrap(m: re.Match) -> str:
mime_raw = m.group(1) or ""
inner = m.group(2) or ""
mime = _normalize_mime(mime_raw)
try:
val = _resolve_square_macro_value(inner, context, out_map)
except Exception:
val = ""
if isinstance(val, (dict, list, bool)) or val is None:
val = _stringify_for_template(val)
else:
val = str(val)
return f"data:{mime};base64,{val}"
# Поддерживаем много вхождений — повторяем до исчерпания (на случай каскадных макросов)
while True:
ns, cnt = _IMG_WRAPPER_RE.subn(_repl_imgwrap, s)
s = ns
if cnt == 0:
break
# 1) Макросы [[VAR:...]] / [[OUT:...]] / [[STORE:...]]
def repl_var(m: re.Match) -> str:
path = m.group(1).strip()
@@ -539,8 +594,24 @@ def _tokenize_condition_expr(expr: str, context: Dict[str, Any], out_map: Dict[s
while j < n and (expr[j].isalnum() or expr[j] in "._"):
j += 1
word = expr[i:j]
# Логические в словах не поддерживаем (используйте &&, ||, !)
tokens.append(word)
lw = word.lower()
# Литералы: true/false/null (любая раскладка) → Python-константы
if re.fullmatch(r"[A-Za-z_][A-Za-z0-9_]*", word) and lw in {"true", "false", "null"}:
tokens.append("True" if lw == "true" else ("False" if lw == "false" else "None"))
i = j
continue
# Поддержка «голых» идентификаторов из vars: cycleindex, WAS_ERROR и т.п.
# Если это простой идентификатор (без точек) и он есть в context.vars — биндим его значением.
try:
vmap = context.get("vars") or {}
except Exception:
vmap = {}
if re.fullmatch(r"[A-Za-z_][A-Za-z0-9_]*", word) and isinstance(vmap, dict) and word in vmap:
name = add_binding(vmap.get(word))
tokens.append(name)
else:
# Логические в словах не поддерживаем (используйте &&, ||, !)
tokens.append(word)
i = j
continue
@@ -687,17 +758,19 @@ def _safe_eval_bool(py_expr: str, bindings: Dict[str, Any]) -> bool:
if isinstance(node.op, ast.Not):
return (not val)
if isinstance(node, ast.BoolOp) and isinstance(node.op, tuple(allowed_boolops)):
vals = [bool(eval_node(v)) for v in node.values]
# Короткое замыкание:
# AND — при первом False прекращаем и возвращаем False; иначе True
# OR — при первом True прекращаем и возвращаем True; иначе False
if isinstance(node.op, ast.And):
res = True
for v in vals:
res = res and v
return res
for v in node.values:
if not bool(eval_node(v)):
return False
return True
if isinstance(node.op, ast.Or):
res = False
for v in vals:
res = res or v
return res
for v in node.values:
if bool(eval_node(v)):
return True
return False
if isinstance(node, ast.Compare):
left = eval_node(node.left)
for opnode, comparator in zip(node.ops, node.comparators):

View File

@@ -0,0 +1,34 @@
from __future__ import annotations
"""
Пакет адаптеров провайдеров для ProviderCall.
Экспортируем:
- ProviderAdapter базовый класс
- Реализации: OpenAIAdapter, GeminiAdapter, GeminiImageAdapter, ClaudeAdapter
- Утилиты: default_base_url_for, insert_items, split_pos_spec
"""
from .base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
default_base_url_for,
insert_items,
split_pos_spec,
)
from .openai import OpenAIAdapter # [OpenAIAdapter](agentui/providers/adapters/openai.py:39)
from .gemini import ( # [GeminiAdapter](agentui/providers/adapters/gemini.py:56)
GeminiAdapter,
GeminiImageAdapter, # [GeminiImageAdapter](agentui/providers/adapters/gemini.py:332)
)
from .claude import ClaudeAdapter # [ClaudeAdapter](agentui/providers/adapters/claude.py:56)
__all__ = [
"ProviderAdapter",
"OpenAIAdapter",
"GeminiAdapter",
"GeminiImageAdapter",
"ClaudeAdapter",
"default_base_url_for",
"insert_items",
"split_pos_spec",
]

View File

@@ -0,0 +1,148 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
class ProviderAdapter(ABC): # [ProviderAdapter.__init__()](agentui/providers/adapters/base.py:10)
"""
Базовый интерфейс адаптера провайдера для ProviderCall.
Задачи адаптера:
- blocks_struct_for_template: собрать pm_struct из унифицированных сообщений (Prompt Blocks)
- normalize_segment/filter_items: привести произвольный сегмент к целевой провайдерной структуре и отфильтровать пустое
- extract_system_text_from_obj: вытащить системный текст из произвольного сегмента (если он там есть)
- combine_segments: слить pre_segments (prompt_preprocess) и prompt_combine с blocks_struct → итоговый pm_struct
- prompt_fragment: собрать строку JSON-фрагмента для подстановки в [[PROMPT]]
- default_endpoint/default_base_url: дефолты путей и базовых URL
"""
name: str = "base"
# --- Дефолты HTTP ---
@abstractmethod
def default_base_url(self) -> str:
...
@abstractmethod
def default_endpoint(self, model: str) -> str:
...
# --- PROMPT: построение провайдерных структур ---
@abstractmethod
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Из унифицированных сообщений [{role, content}] (включая text+image) собрать pm_struct
для целевого провайдера. Результат должен быть совместим с текущей логикой [[PROMPT]].
"""
...
@abstractmethod
def normalize_segment(self, obj: Any) -> List[Dict[str, Any]]:
"""
Привести произвольный сегмент (dict/list/str/числа) к целевому массиву элементов
(например, messages для openai/claude или contents для gemini).
"""
...
@abstractmethod
def filter_items(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Отфильтровать пустые элементы (пустые тексты и т.п.) согласно правилам провайдера.
"""
...
@abstractmethod
def extract_system_text_from_obj(self, obj: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Вытащить системный текст из произвольного объекта фрагмента:
- OpenAI: messages[*] role=system
- Gemini: systemInstruction.parts[].text
- Claude: top-level system (string/blocks)
Возвращает строку или None.
"""
...
@abstractmethod
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn, # (s, ctx, out_map) -> str
var_macro_fullmatch_re, # _VAR_MACRO_RE.fullmatch
detect_vendor_fn, # detect_vendor
) -> Dict[str, Any]:
"""
Слить blocks_struct c массивами pre_segments_raw и строковыми raw_segs (prompt_combine)
и вернуть итоговый pm_struct. Поведение должно повторять текущее (позиционирование, фильтр пустых,
сбор системного текста).
"""
...
@abstractmethod
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Сформировать строку JSON-фрагмента для [[PROMPT]] по итоговому pm_struct.
"""
...
# --- Общие утилиты для позиционирования и парсинга директив ---------------------
def insert_items(base: List[Any], items: List[Any], pos_spec: Optional[str]) -> List[Any]: # [insert_items()](agentui/providers/adapters/base.py:114)
if not items:
return base
if not pos_spec or str(pos_spec).lower() == "append":
base.extend(items)
return base
p = str(pos_spec).lower()
if p == "prepend":
return list(items) + base
try:
idx = int(pos_spec) # type: ignore[arg-type]
if idx < 0:
idx = len(base) + idx
if idx < 0:
idx = 0
if idx > len(base):
idx = len(base)
return base[:idx] + list(items) + base[idx:]
except Exception:
base.extend(items)
return base
def split_pos_spec(s: str) -> Tuple[str, Optional[str]]: # [split_pos_spec()](agentui/providers/adapters/base.py:135)
"""
Отделить директиву @pos=... от тела сегмента.
Возвращает (body, pos_spec | None).
"""
import re as _re
m = _re.search(r"@pos\s*=\s*(prepend|append|-?\d+)\s*$", str(s or ""), flags=_re.IGNORECASE)
if not m:
return (str(s or "").strip(), None)
body = str(s[: m.start()]).strip()
return (body, str(m.group(1)).strip().lower())
# --- Дефолтные base_url по "вендору" (используется RawForward) ------------------
def default_base_url_for(vendor: str) -> Optional[str]: # [default_base_url_for()](agentui/providers/adapters/base.py:149)
v = (vendor or "").strip().lower()
if v == "openai":
return "https://api.openai.com"
if v == "claude" or v == "anthropic":
return "https://api.anthropic.com"
if v == "gemini" or v == "gemini_image":
return "https://generativelanguage.googleapis.com"
return None

View File

@@ -0,0 +1,475 @@
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
from agentui.providers.adapters.base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
insert_items,
split_pos_spec,
)
def _is_data_url(u: str) -> bool: # [_is_data_url()](agentui/providers/adapters/claude.py:14)
return isinstance(u, str) and u.strip().lower().startswith("data:")
def _split_data_url(u: str) -> tuple[str, str]: # [_split_data_url()](agentui/providers/adapters/claude.py:18)
"""
Возвращает (mime, b64) для data URL.
Поддерживаем форму: data:<mime>;base64,<b64>
"""
try:
header, b64 = u.split(",", 1)
mime = "application/octet-stream"
if header.startswith("data:"):
header2 = header[5:]
if ";base64" in header2:
mime = header2.split(";base64", 1)[0] or mime
elif ";" in header2:
mime = header2.split(";", 1)[0] or mime
elif header2:
mime = header2
return mime, b64
except Exception:
return "application/octet-stream", ""
def _try_json(s: str) -> Any: # [_try_json()](agentui/providers/adapters/claude.py:38)
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
class ClaudeAdapter(ProviderAdapter): # [ClaudeAdapter.__init__()](agentui/providers/adapters/claude.py:56)
name = "claude"
# --- Дефолты HTTP ---
def default_base_url(self) -> str:
return "https://api.anthropic.com"
def default_endpoint(self, model: str) -> str:
return "/v1/messages"
# --- PROMPT: построение провайдерных структур ---
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Совместимо с веткой provider=='claude' из
[ProviderCallNode._blocks_struct_for_template()](agentui/pipeline/executor.py:2022).
"""
# Системные сообщения как текст
sys_msgs = []
for m in (unified_messages or []):
if m.get("role") == "system":
c = m.get("content")
if isinstance(c, list):
sys_msgs.append("\n".join([str(p.get("text") or "") for p in c if isinstance(p, dict) and p.get("type") == "text"]))
else:
sys_msgs.append(str(c or ""))
sys_text = "\n\n".join([s for s in sys_msgs if s]).strip()
out_msgs = []
for m in (unified_messages or []):
if m.get("role") == "system":
continue
role = m.get("role")
role = role if role in {"user", "assistant"} else "user"
c = m.get("content")
blocks: List[Dict[str, Any]] = []
if isinstance(c, list):
for p in c:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
blocks.append({"type": "text", "text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = str(p.get("url") or "")
if _is_data_url(url):
mime, b64 = _split_data_url(url)
blocks.append({"type": "image", "source": {"type": "base64", "media_type": mime, "data": b64}})
else:
blocks.append({"type": "image", "source": {"type": "url", "url": url}})
else:
blocks.append({"type": "text", "text": str(c or "")})
out_msgs.append({"role": role, "content": blocks})
claude_no_system = False
try:
claude_no_system = bool((node_config or {}).get("claude_no_system", False))
except Exception:
claude_no_system = False
if claude_no_system:
if sys_text:
out_msgs = [{"role": "user", "content": [{"type": "text", "text": sys_text}]}] + out_msgs
return {
"messages": out_msgs,
"system_text": sys_text,
}
d = {
"system_text": sys_text,
"messages": out_msgs,
}
if sys_text:
# Prefer system as a plain string (proxy compatibility)
d["system"] = sys_text
return d
def normalize_segment(self, x: Any) -> List[Dict[str, Any]]:
"""
Совместимо с [_as_claude_messages()](agentui/pipeline/executor.py:2602).
"""
msgs: List[Dict[str, Any]] = []
try:
if isinstance(x, dict):
# Dict with messages (OpenAI-like)
if isinstance(x.get("messages"), list):
x = x.get("messages") or []
# fallthrough to list mapping below
elif isinstance(x.get("contents"), list):
# Gemini -> Claude
for c in (x.get("contents") or []):
if not isinstance(c, dict):
continue
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
parts = c.get("parts") or []
text = "\n".join([str(p.get("text")) for p in parts if isinstance(p, dict) and isinstance(p.get("text"), str)]).strip()
msgs.append({"role": role, "content": [{"type": "text", "text": text}]})
return msgs
if isinstance(x, list):
# Gemini contents list -> Claude messages
if all(isinstance(c, dict) and "parts" in c for c in x):
for c in x:
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
blocks: List[Dict[str, Any]] = []
for p in (c.get("parts") or []):
if isinstance(p, dict) and isinstance(p.get("text"), str):
txt = p.get("text").strip()
if txt:
blocks.append({"type": "text", "text": txt})
msgs.append({"role": role, "content": blocks or [{"type": "text", "text": ""}]})
return msgs
# OpenAI messages list -> Claude
if all(isinstance(m, dict) and "content" in m for m in x):
out: List[Dict[str, Any]] = []
for m in x:
role = m.get("role", "user")
cont = m.get("content")
blocks: List[Dict[str, Any]] = []
if isinstance(cont, str):
blocks.append({"type": "text", "text": cont})
elif isinstance(cont, list):
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
blocks.append({"type": "text", "text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
blocks.append({"type": "image", "source": {"type": "url", "url": url}})
else:
blocks.append({"type": "text", "text": json.dumps(cont, ensure_ascii=False)})
out.append({"role": role if role in {"user", "assistant"} else "user", "content": blocks})
return out
# Fallback
return [{"role": "user", "content": [{"type": "text", "text": json.dumps(x, ensure_ascii=False)}]}]
if isinstance(x, str):
try_obj = _try_json(x)
if try_obj is not None:
return self.normalize_segment(try_obj)
return [{"role": "user", "content": [{"type": "text", "text": x}]}]
return [{"role": "user", "content": [{"type": "text", "text": json.dumps(x, ensure_ascii=False)}]}]
except Exception:
return [{"role": "user", "content": [{"type": "text", "text": str(x)}]}]
def filter_items(self, arr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Совместимо с [_filter_claude()](agentui/pipeline/executor.py:2820).
"""
out: List[Dict[str, Any]] = []
for m in (arr or []):
if not isinstance(m, dict):
continue
blocks = m.get("content")
if isinstance(blocks, list):
norm = []
for b in blocks:
if isinstance(b, dict) and b.get("type") == "text":
txt = str(b.get("text") or "")
if txt.strip():
norm.append({"type": "text", "text": txt})
if norm:
out.append({"role": m.get("role", "user"), "content": norm})
return out
def extract_system_text_from_obj(self, x: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Поведение совместимо с [_extract_sys_text_from_obj()](agentui/pipeline/executor.py:2676).
"""
try:
# Dict objects
if isinstance(x, dict):
# Gemini systemInstruction
if "systemInstruction" in x:
si = x.get("systemInstruction")
def _parts_to_text(siobj: Any) -> str:
try:
parts = siobj.get("parts") or []
texts = [
str(p.get("text") or "")
for p in parts
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip()
]
return "\n".join([t for t in texts if t]).strip()
except Exception:
return ""
if isinstance(si, dict):
t = _parts_to_text(si)
if t:
return t
if isinstance(si, list):
texts = []
for p in si:
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip():
texts.append(p.get("text").strip())
t = "\n".join(texts).strip()
if t:
return t
if isinstance(si, str) and si.strip():
return si.strip()
# Claude system (string or blocks)
if "system" in x and not ("messages" in x and isinstance(x.get("messages"), list)):
sysv = x.get("system")
if isinstance(sysv, str) and sysv.strip():
return sysv.strip()
if isinstance(sysv, list):
texts = [
str(b.get("text") or "")
for b in sysv
if isinstance(b, dict)
and (b.get("type") == "text")
and isinstance(b.get("text"), str)
and b.get("text").strip()
]
t = "\n".join([t for t in texts if t]).strip()
if t:
return t
# OpenAI messages with role=system
if isinstance(x.get("messages"), list):
sys_msgs = []
for m in (x.get("messages") or []):
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if isinstance(p, dict) and p.get("type") == "text" and isinstance(p.get("text"), str) and p.get("text").strip():
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# List objects
if isinstance(x, list):
# OpenAI messages list with role=system
if all(isinstance(m, dict) and "role" in m for m in x):
sys_msgs = []
for m in x:
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if isinstance(p, dict) and p.get("type") == "text" and isinstance(p.get("text"), str) and p.get("text").strip():
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# Gemini 'contents' list: попробуем прочитать systemInstruction из входящего snapshot
if all(isinstance(c, dict) and "parts" in c for c in x):
try:
inc = (render_ctx.get("incoming") or {}).get("json") or {}
si = inc.get("systemInstruction")
if si is not None:
return self.extract_system_text_from_obj({"systemInstruction": si}, render_ctx)
except Exception:
pass
return None
except Exception:
return None
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn,
var_macro_fullmatch_re,
detect_vendor_fn,
) -> Dict[str, Any]:
"""
Повторяет ветку provider=='claude' из prompt_combine
([ProviderCallNode.run()](agentui/pipeline/executor.py:2998)).
"""
built3: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# Нода-конфиг (для claude_no_system) передан через render_ctx['_node_config'], см. интеграцию
node_cfg = {}
try:
nc = render_ctx.get("_node_config")
if isinstance(nc, dict):
node_cfg = nc
except Exception:
node_cfg = {}
claude_no_system = False
try:
claude_no_system = bool(node_cfg.get("claude_no_system", False))
except Exception:
claude_no_system = False
# Пред‑сегменты
for _pre in (pre_segments_raw or []):
try:
_obj = _pre.get("obj")
items = self.normalize_segment(_obj)
items = self.filter_items(items)
built3 = insert_items(built3, items, _pre.get("pos"))
try:
sx = self.extract_system_text_from_obj(_obj, render_ctx)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
# Основные сегменты
for raw_seg in (raw_segs or []):
body_seg, pos_spec = split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = self.filter_items(list(blocks_struct.get("messages", []) or []))
built3 = insert_items(built3, items, pos_spec)
continue
m_pre = var_macro_fullmatch_re.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Skip duplicate var segment - already inserted via prompt_preprocess (filtered)
continue
except Exception:
pass
resolved = render_template_simple_fn(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
try:
pg = detect_vendor_fn(obj if isinstance(obj, dict) else {})
print(f"DEBUG: prompt_combine seg provider_guess={pg} -> target=claude pos={pos_spec}")
except Exception:
pass
items = self.normalize_segment(obj if obj is not None else resolved)
items = self.filter_items(items)
built3 = insert_items(built3, items, pos_spec)
try:
sx = self.extract_system_text_from_obj(obj, render_ctx) if obj is not None else None
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
if not built3:
built3 = self.filter_items(list(blocks_struct.get("messages", []) or []))
# Merge system blocks from PROMPT blocks + gathered sys_texts
existing_sys = blocks_struct.get("system") or []
sys_blocks: List[Dict[str, Any]] = []
if isinstance(existing_sys, list):
sys_blocks.extend(existing_sys)
st0 = blocks_struct.get("system_text") or ""
# Ensure PROMPT system_text from blocks is included as a Claude system block
if isinstance(st0, str) and st0.strip():
sys_blocks.append({"type": "text", "text": st0})
for s in sys_texts:
sys_blocks.append({"type": "text", "text": s})
st = "\n\n".join([t for t in [st0] + sys_texts if isinstance(t, str) and t.strip()])
if claude_no_system:
# Prepend system text as a user message instead of top-level system
if st:
built3 = [{"role": "user", "content": [{"type": "text", "text": st}]}] + built3
return {"messages": built3, "system_text": st}
pm_struct = {"messages": built3, "system_text": st}
# Prefer array of system blocks when possible; fallback to single text block
if sys_blocks:
pm_struct["system"] = sys_blocks
elif st:
pm_struct["system"] = [{"type": "text", "text": st}]
return pm_struct
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Совместимо с веткой provider=='claude' в построении [[PROMPT]]
([ProviderCallNode.run()](agentui/pipeline/executor.py:3125)).
"""
parts: List[str] = []
# Учитываем флаг совместимости: при claude_no_system не добавляем top-level "system"
claude_no_system = False
try:
claude_no_system = bool((node_config or {}).get("claude_no_system", False))
except Exception:
claude_no_system = False
if not claude_no_system:
# Предпочитаем массив блоков system, если он есть; иначе строковый system_text
sys_val = pm_struct.get("system", None)
if sys_val is None:
sys_val = pm_struct.get("system_text")
if sys_val:
parts.append('"system": ' + json.dumps(sys_val, ensure_ascii=False))
msgs = pm_struct.get("messages")
if msgs is not None:
parts.append('"messages": ' + json.dumps(msgs, ensure_ascii=False))
return ", ".join(parts)

View File

@@ -0,0 +1,419 @@
from __future__ import annotations
import json
from typing import Any, Dict, List, Optional, Tuple
from agentui.providers.adapters.base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
insert_items,
split_pos_spec,
)
def _is_data_url(u: str) -> bool: # [_is_data_url()](agentui/providers/adapters/gemini.py:14)
return isinstance(u, str) and u.strip().lower().startswith("data:")
def _split_data_url(u: str) -> tuple[str, str]: # [_split_data_url()](agentui/providers/adapters/gemini.py:18)
"""
Возвращает (mime, b64) для data URL.
Поддерживаем форму: data:<mime>;base64,<b64>
"""
try:
header, b64 = u.split(",", 1)
mime = "application/octet-stream"
if header.startswith("data:"):
header2 = header[5:]
if ";base64" in header2:
mime = header2.split(";base64", 1)[0] or mime
elif ";" in header2:
mime = header2.split(";", 1)[0] or mime
elif header2:
mime = header2
return mime, b64
except Exception:
return "application/octet-stream", ""
def _try_json(s: str) -> Any: # [_try_json()](agentui/providers/adapters/gemini.py:38)
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
class GeminiAdapter(ProviderAdapter): # [GeminiAdapter.__init__()](agentui/providers/adapters/gemini.py:56)
name = "gemini"
# --- Дефолты HTTP ---
def default_base_url(self) -> str:
return "https://generativelanguage.googleapis.com"
def default_endpoint(self, model: str) -> str:
# endpoint с шаблоном model (как в исходном коде)
return "/v1beta/models/{{ model }}:generateContent"
# --- PROMPT: построение провайдерных структур ---
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Совместимо с веткой provider in {'gemini','gemini_image'} из
[ProviderCallNode._blocks_struct_for_template()](agentui/pipeline/executor.py:1981).
"""
def _text_from_msg(m: Dict[str, Any]) -> str:
c = m.get("content")
if isinstance(c, list):
texts = [str(p.get("text") or "") for p in c if isinstance(p, dict) and p.get("type") == "text"]
return "\n".join([t for t in texts if t])
return str(c or "")
sys_text = "\n\n".join([_text_from_msg(m) for m in (unified_messages or []) if m.get("role") == "system"]).strip()
contents: List[Dict[str, Any]] = []
for m in (unified_messages or []):
if m.get("role") == "system":
continue
role = "model" if m.get("role") == "assistant" else "user"
c = m.get("content")
parts: List[Dict[str, Any]] = []
if isinstance(c, list):
for p in c:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts.append({"text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = str(p.get("url") or "")
if _is_data_url(url):
mime, b64 = _split_data_url(url)
parts.append({"inline_data": {"mime_type": mime, "data": b64}})
else:
parts.append({"text": url})
else:
parts.append({"text": str(c or "")})
contents.append({"role": role, "parts": parts})
d: Dict[str, Any] = {
"contents": contents,
"system_text": sys_text,
}
if sys_text:
d["systemInstruction"] = {"parts": [{"text": sys_text}]}
return d
def normalize_segment(self, x: Any) -> List[Dict[str, Any]]:
"""
Совместимо с [_as_gemini_contents()](agentui/pipeline/executor.py:2521).
"""
cnts: List[Dict[str, Any]] = []
try:
if isinstance(x, dict):
if isinstance(x.get("contents"), list):
return list(x.get("contents") or [])
if isinstance(x.get("messages"), list):
# OpenAI → Gemini
for m in (x.get("messages") or []):
if not isinstance(m, dict):
continue
role_raw = str(m.get("role") or "user")
role = "model" if role_raw == "assistant" else "user"
cont = m.get("content")
parts: List[Dict[str, Any]] = []
if isinstance(cont, str):
parts = [{"text": cont}]
elif isinstance(cont, list):
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts.append({"text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
# Gemini не принимает внешние URL картинок как image — оставим как текстовую ссылку
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
parts.append({"text": url})
else:
parts = [{"text": json.dumps(cont, ensure_ascii=False)}]
cnts.append({"role": role, "parts": parts})
return cnts
if isinstance(x, list):
# Gemini contents list already
if all(isinstance(c, dict) and "parts" in c for c in x):
return list(x)
# OpenAI messages list -> Gemini
if all(isinstance(m, dict) and "content" in m for m in x):
out: List[Dict[str, Any]] = []
for m in x:
role_raw = str(m.get("role") or "user")
role = "model" if role_raw == "assistant" else "user"
cont = m.get("content")
parts: List[Dict[str, Any]] = []
if isinstance(cont, str):
parts = [{"text": cont}]
elif isinstance(cont, list):
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts.append({"text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
parts.append({"text": url})
else:
parts = [{"text": json.dumps(cont, ensure_ascii=False)}]
out.append({"role": role, "parts": parts})
return out
# Fallback
return [{"role": "user", "parts": [{"text": json.dumps(x, ensure_ascii=False)}]}]
if isinstance(x, str):
try_obj = _try_json(x)
if try_obj is not None:
return self.normalize_segment(try_obj)
return [{"role": "user", "parts": [{"text": x}]}]
return [{"role": "user", "parts": [{"text": json.dumps(x, ensure_ascii=False)}]}]
except Exception:
return [{"role": "user", "parts": [{"text": str(x)}]}]
def filter_items(self, arr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Совместимо с [_filter_gemini()](agentui/pipeline/executor.py:2782).
Сохраняем inline_data/inlineData как есть; текстовые части — только непустые.
"""
out: List[Dict[str, Any]] = []
for it in (arr or []):
if not isinstance(it, dict):
continue
parts = it.get("parts") or []
norm_parts = []
for p in parts:
if isinstance(p, dict):
t = p.get("text")
if isinstance(t, str) and t.strip():
norm_parts.append({"text": t})
elif "inline_data" in p or "inlineData" in p:
norm_parts.append(p) # изображения пропускаем как есть
if norm_parts:
out.append({"role": it.get("role", "user"), "parts": norm_parts})
return out
def extract_system_text_from_obj(self, x: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Совместимо с [_extract_sys_text_from_obj()](agentui/pipeline/executor.py:2676) для Gemini.
"""
try:
# Dict
if isinstance(x, dict):
if "systemInstruction" in x:
si = x.get("systemInstruction")
def _parts_to_text(siobj: Any) -> str:
try:
parts = siobj.get("parts") or []
texts = [
str(p.get("text") or "")
for p in parts
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip()
]
return "\n".join([t for t in texts if t]).strip()
except Exception:
return ""
if isinstance(si, dict):
t = _parts_to_text(si)
if t:
return t
if isinstance(si, list):
texts = []
for p in si:
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip():
texts.append(p.get("text").strip())
t = "\n".join(texts).strip()
if t:
return t
if isinstance(si, str) and si.strip():
return si.strip()
# OpenAI system внутри messages
if isinstance(x.get("messages"), list):
sys_msgs = []
for m in (x.get("messages") or []):
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# List
if isinstance(x, list):
if all(isinstance(m, dict) and "role" in m for m in x):
sys_msgs = []
for m in x:
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# Gemini contents list -> попробуем взять из входящего snapshot
if all(isinstance(c, dict) and "parts" in c for c in x):
try:
inc = (render_ctx.get("incoming") or {}).get("json") or {}
si = inc.get("systemInstruction")
if si is not None:
return self.extract_system_text_from_obj({"systemInstruction": si}, render_ctx)
except Exception:
pass
return None
except Exception:
return None
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn,
var_macro_fullmatch_re,
detect_vendor_fn,
) -> Dict[str, Any]:
"""
Повторяет ветку provider in {'gemini','gemini_image'} из prompt_combine
([ProviderCallNode.run()](agentui/pipeline/executor.py:2874)).
"""
built: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# 1) Пред‑сегменты
for _pre in (pre_segments_raw or []):
try:
_obj = _pre.get("obj")
items = self.normalize_segment(_obj)
items = self.filter_items(items)
built = insert_items(built, items, _pre.get("pos"))
try:
sx = self.extract_system_text_from_obj(_obj, render_ctx)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
# 2) Основные сегменты
for raw_seg in (raw_segs or []):
body_seg, pos_spec = split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = self.filter_items(list(blocks_struct.get("contents", []) or []))
built = insert_items(built, items, pos_spec)
continue
m_pre = var_macro_fullmatch_re.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
continue
except Exception:
pass
resolved = render_template_simple_fn(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
# debug provider guess
try:
pg = detect_vendor_fn(obj if isinstance(obj, dict) else {})
print(f"DEBUG: prompt_combine seg provider_guess={pg} -> target=gemini pos={pos_spec}")
except Exception:
pass
items = self.normalize_segment(obj if obj is not None else resolved)
items = self.filter_items(items)
built = insert_items(built, items, pos_spec)
try:
sx = self.extract_system_text_from_obj(obj, render_ctx) if obj is not None else None
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
if not built:
built = self.filter_items(list(blocks_struct.get("contents", []) or []))
# Merge systemInstruction: PROMPT blocks + gathered sys_texts
existing_si = blocks_struct.get("systemInstruction")
parts = []
if isinstance(existing_si, dict) and isinstance(existing_si.get("parts"), list):
parts = list(existing_si.get("parts") or [])
for s in sys_texts:
parts.append({"text": s})
new_si = {"parts": parts} if parts else existing_si
return {"contents": built, "systemInstruction": new_si, "system_text": blocks_struct.get("system_text")}
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Совместимо с веткой provider in {'gemini','gemini_image'} в построении [[PROMPT]]
([ProviderCallNode.run()](agentui/pipeline/executor.py:3103)).
"""
parts = []
contents = pm_struct.get("contents")
if contents is not None:
parts.append('"contents": ' + json.dumps(contents, ensure_ascii=False))
sysi = pm_struct.get("systemInstruction")
if sysi is not None:
parts.append('"systemInstruction": ' + json.dumps(sysi, ensure_ascii=False))
return ", ".join(parts)
class GeminiImageAdapter(GeminiAdapter): # [GeminiImageAdapter.__init__()](agentui/providers/adapters/gemini.py:332)
name = "gemini_image"
# Вся логика такая же, как у Gemini (generateContent), включая defaults.

View File

@@ -0,0 +1,398 @@
from __future__ import annotations
import json
import re
from typing import Any, Dict, List, Optional, Tuple
from agentui.providers.adapters.base import ( # [ProviderAdapter](agentui/providers/adapters/base.py:10)
ProviderAdapter,
insert_items,
split_pos_spec,
)
def _try_json(s: str) -> Any: # [_try_json()](agentui/providers/adapters/openai.py:16)
"""
Парсит JSON из строки. Пермиссивный режим и двукратная распаковка строк, как в старой логике.
Возвращает dict/list/примитив или None при неудаче.
"""
try:
obj = json.loads(s)
except Exception:
try:
obj = json.loads(s, strict=False) # type: ignore[call-arg]
except Exception:
return None
# Если это строка, которая сама похожа на JSON — пробуем распаковать до 2 раз
for _ in range(2):
if isinstance(obj, str):
st = obj.strip()
if (st.startswith("{") and st.endswith("}")) or (st.startswith("[") and st.endswith("]")):
try:
obj = json.loads(st)
continue
except Exception:
break
break
return obj
class OpenAIAdapter(ProviderAdapter): # [OpenAIAdapter.__init__()](agentui/providers/adapters/openai.py:39)
name = "openai"
# --- Дефолты HTTP ---
def default_base_url(self) -> str:
return "https://api.openai.com"
def default_endpoint(self, model: str) -> str:
return "/v1/chat/completions"
# --- PROMPT: построение провайдерных структур ---
def blocks_struct_for_template(
self,
unified_messages: List[Dict[str, Any]],
context: Dict[str, Any],
node_config: Dict[str, Any],
) -> Dict[str, Any]:
"""
Совместимо с веткой provider=='openai' из [ProviderCallNode._blocks_struct_for_template()](agentui/pipeline/executor.py:1958).
"""
def _map(m: Dict[str, Any]) -> Dict[str, Any]:
c = m.get("content")
if isinstance(c, list):
parts = []
for p in c:
if isinstance(p, dict) and p.get("type") == "text":
parts.append({"type": "text", "text": str(p.get("text") or "")})
elif isinstance(p, dict) and p.get("type") in {"image_url", "image"}:
url = str(p.get("url") or "")
parts.append({"type": "image_url", "image_url": {"url": url}})
return {"role": m.get("role", "user"), "content": parts}
return {"role": m.get("role", "user"), "content": str(c or "")}
# system_text — склейка всех system-блоков (только текст, без картинок)
sys_text = "\n\n".join(
[
str(m.get("content") or "")
if not isinstance(m.get("content"), list)
else "\n".join(
[str(p.get("text") or "") for p in m.get("content") if isinstance(p, dict) and p.get("type") == "text"]
)
for m in (unified_messages or [])
if m.get("role") == "system"
]
).strip()
return {
"messages": [_map(m) for m in (unified_messages or [])],
"system_text": sys_text,
}
def normalize_segment(self, x: Any) -> List[Dict[str, Any]]:
"""
Совместимо с [_as_openai_messages()](agentui/pipeline/executor.py:2451).
- Поддерживает dict with messages (openai)
- Поддерживает dict/list в стиле Gemini.contents (склейка текстов частей)
- Поддерживает list openai-like messages (нормализация parts)
- Строки/прочее упаковываются как один user message
"""
msgs: List[Dict[str, Any]] = []
try:
# Dict inputs
if isinstance(x, dict):
if isinstance(x.get("messages"), list):
return list(x.get("messages") or [])
if isinstance(x.get("contents"), list):
# Gemini -> OpenAI (text-only join)
for c in (x.get("contents") or []):
if not isinstance(c, dict):
continue
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
parts = c.get("parts") or []
text = "\n".join(
[str(p.get("text")) for p in parts if isinstance(p, dict) and isinstance(p.get("text"), str)]
).strip()
msgs.append({"role": role, "content": text})
return msgs
# List inputs
if isinstance(x, list):
# Gemini contents list -> OpenAI messages
if all(isinstance(c, dict) and "parts" in c for c in x):
for c in x:
role_raw = str(c.get("role") or "user")
role = "assistant" if role_raw == "model" else ("user" if role_raw not in {"user", "assistant"} else role_raw)
parts = c.get("parts") or []
text = "\n".join(
[str(p.get("text")) for p in parts if isinstance(p, dict) and isinstance(p.get("text"), str)]
).strip()
msgs.append({"role": role, "content": text})
return msgs
# OpenAI messages list already — normalize parts if needed
if all(isinstance(m, dict) and "content" in m for m in x):
out: List[Dict[str, Any]] = []
for m in x:
role = m.get("role", "user")
cont = m.get("content")
if isinstance(cont, str):
out.append({"role": role, "content": cont})
elif isinstance(cont, list):
parts2: List[Dict[str, Any]] = []
for p in cont:
if not isinstance(p, dict):
continue
if p.get("type") == "text":
parts2.append({"type": "text", "text": str(p.get("text") or "")})
elif p.get("type") in {"image_url", "image"}:
url = ""
if isinstance(p.get("image_url"), dict):
url = str((p.get("image_url") or {}).get("url") or "")
elif "url" in p:
url = str(p.get("url") or "")
if url:
parts2.append({"type": "image_url", "image_url": {"url": url}})
out.append({"role": role, "content": parts2 if parts2 else ""})
return out
# Fallback: dump JSON as a single user message
return [{"role": "user", "content": json.dumps(x, ensure_ascii=False)}]
# Primitive inputs or embedded JSON string
if isinstance(x, str):
try_obj = _try_json(x)
if try_obj is not None:
return self.normalize_segment(try_obj)
return [{"role": "user", "content": x}]
return [{"role": "user", "content": json.dumps(x, ensure_ascii=False)}]
except Exception:
return [{"role": "user", "content": str(x)}]
def filter_items(self, arr: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Совместимо с [_filter_openai()](agentui/pipeline/executor.py:2801).
"""
out: List[Dict[str, Any]] = []
for m in (arr or []):
if not isinstance(m, dict):
continue
c = m.get("content")
if isinstance(c, str) and c.strip():
out.append({"role": m.get("role", "user"), "content": c})
elif isinstance(c, list):
parts = []
for p in c:
if isinstance(p, dict) and p.get("type") == "text":
txt = str(p.get("text") or "")
if txt.strip():
parts.append({"type": "text", "text": txt})
if parts:
out.append({"role": m.get("role", "user"), "content": parts})
return out
def extract_system_text_from_obj(self, x: Any, render_ctx: Dict[str, Any]) -> Optional[str]:
"""
Совместимо с [_extract_sys_text_from_obj()](agentui/pipeline/executor.py:2676).
Умеет читать:
- Gemini: systemInstruction.parts[].text
- Claude: top-level system (string/list of blocks)
- OpenAI: messages[*] with role=system (string content or parts[].text)
- List форматы: openai messages list и gemini contents list (в последнем случае смотрит incoming.json.systemInstruction)
"""
try:
# Dict objects
if isinstance(x, dict):
# Gemini systemInstruction
if "systemInstruction" in x:
si = x.get("systemInstruction")
def _parts_to_text(siobj: Any) -> str:
try:
parts = siobj.get("parts") or []
texts = [
str(p.get("text") or "")
for p in parts
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip()
]
return "\n".join([t for t in texts if t]).strip()
except Exception:
return ""
if isinstance(si, dict):
t = _parts_to_text(si)
if t:
return t
if isinstance(si, list):
texts = []
for p in si:
if isinstance(p, dict) and isinstance(p.get("text"), str) and p.get("text").strip():
texts.append(p.get("text").strip())
t = "\n".join(texts).strip()
if t:
return t
if isinstance(si, str) and si.strip():
return si.strip()
# Claude system (string or blocks)
if "system" in x and not ("messages" in x and isinstance(x.get("messages"), list)):
sysv = x.get("system")
if isinstance(sysv, str) and sysv.strip():
return sysv.strip()
if isinstance(sysv, list):
texts = [
str(b.get("text") or "")
for b in sysv
if isinstance(b, dict) and (b.get("type") == "text") and isinstance(b.get("text"), str) and b.get("text").strip()
]
t = "\n".join([t for t in texts if t]).strip()
if t:
return t
# OpenAI messages with role=system
if isinstance(x.get("messages"), list):
sys_msgs = []
for m in (x.get("messages") or []):
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# List objects
if isinstance(x, list):
# OpenAI messages list with role=system
if all(isinstance(m, dict) and "role" in m for m in x):
sys_msgs = []
for m in x:
try:
if (str(m.get("role") or "").lower().strip() == "system"):
cont = m.get("content")
if isinstance(cont, str) and cont.strip():
sys_msgs.append(cont.strip())
elif isinstance(cont, list):
for p in cont:
if (
isinstance(p, dict)
and p.get("type") == "text"
and isinstance(p.get("text"), str)
and p.get("text").strip()
):
sys_msgs.append(p.get("text").strip())
except Exception:
continue
if sys_msgs:
return "\n\n".join(sys_msgs).strip()
# Gemini 'contents' list: try to read systemInstruction from incoming JSON snapshot
if all(isinstance(c, dict) and "parts" in c for c in x):
try:
inc = (render_ctx.get("incoming") or {}).get("json") or {}
si = inc.get("systemInstruction")
if si is not None:
# Рекурсивно используем себя
return self.extract_system_text_from_obj({"systemInstruction": si}, render_ctx)
except Exception:
pass
return None
except Exception:
return None
def combine_segments(
self,
blocks_struct: Dict[str, Any],
pre_segments_raw: List[Dict[str, Any]],
raw_segs: List[str],
render_ctx: Dict[str, Any],
pre_var_paths: set[str],
render_template_simple_fn,
var_macro_fullmatch_re,
detect_vendor_fn,
) -> Dict[str, Any]:
"""
Повторяет ветку provider=='openai' из prompt_combine в [ProviderCallNode.run()](agentui/pipeline/executor.py:2936).
"""
built: List[Dict[str, Any]] = []
sys_texts: List[str] = []
# 1) Пред‑сегменты (prompt_preprocess)
for _pre in (pre_segments_raw or []):
try:
_obj = _pre.get("obj")
items = self.normalize_segment(_obj)
items = self.filter_items(items)
built = insert_items(built, items, _pre.get("pos"))
try:
sx = self.extract_system_text_from_obj(_obj, render_ctx)
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
except Exception:
pass
# 2) Основные сегменты (prompt_combine)
for raw_seg in (raw_segs or []):
body_seg, pos_spec = split_pos_spec(raw_seg)
if body_seg == "[[PROMPT]]":
items = self.filter_items(list(blocks_struct.get("messages", []) or []))
built = insert_items(built, items, pos_spec)
continue
# Спрятать дубли plain [[VAR:path]] если уже вставляли этим путём в pre_var_overrides
m_pre = var_macro_fullmatch_re.fullmatch(body_seg)
if m_pre:
_p = (m_pre.group(1) or "").strip()
try:
if _p in pre_var_paths:
# Уже вставлено через prompt_preprocess с фильтрацией — пропускаем
continue
except Exception:
pass
resolved = render_template_simple_fn(body_seg, render_ctx, render_ctx.get("OUT") or {})
obj = _try_json(resolved)
# debug provider guess
try:
pg = detect_vendor_fn(obj if isinstance(obj, dict) else {})
print(f"DEBUG: prompt_combine seg provider_guess={pg} -> target=openai pos={pos_spec}")
except Exception:
pass
items = self.normalize_segment(obj if obj is not None else resolved)
items = self.filter_items(items)
built = insert_items(built, items, pos_spec)
try:
sx = self.extract_system_text_from_obj(obj, render_ctx) if obj is not None else None
if isinstance(sx, str) and sx.strip():
sys_texts.append(sx.strip())
except Exception:
pass
# Если ничего не собрали — берём исходные blocks
if not built:
built = self.filter_items(list(blocks_struct.get("messages", []) or []))
# Препендинг системных сообщений из sys_texts
if sys_texts:
sys_msgs = [{"role": "system", "content": s} for s in sys_texts if s]
if sys_msgs:
built = sys_msgs + built
# keep system_text for UI/debug
st0 = blocks_struct.get("system_text") or ""
st = "\n\n".join([t for t in [st0] + sys_texts if isinstance(t, str) and t.strip()])
return {"messages": built, "system_text": st}
def prompt_fragment(self, pm_struct: Dict[str, Any], node_config: Dict[str, Any]) -> str:
"""
Совместимо с веткой provider=='openai' в построении [[PROMPT]] из [ProviderCallNode.run()](agentui/pipeline/executor.py:3103).
"""
return '"messages": ' + json.dumps(pm_struct.get("messages", []), ensure_ascii=False)

View File

@@ -0,0 +1,32 @@
from __future__ import annotations
from typing import Optional
from agentui.providers.adapters.base import ProviderAdapter, default_base_url_for as _default_base_url_for
from agentui.providers.adapters.openai import OpenAIAdapter
try:
from agentui.providers.adapters.gemini import GeminiAdapter, GeminiImageAdapter
except Exception:
GeminiAdapter = None # type: ignore
GeminiImageAdapter = None # type: ignore
try:
from agentui.providers.adapters.claude import ClaudeAdapter
except Exception:
ClaudeAdapter = None # type: ignore
def get_adapter(provider: str) -> Optional[ProviderAdapter]:
p = (provider or "").strip().lower()
if p == "openai":
return OpenAIAdapter()
if p == "gemini" and GeminiAdapter:
return GeminiAdapter() # type: ignore[operator]
if p == "gemini_image" and GeminiImageAdapter:
return GeminiImageAdapter() # type: ignore[operator]
if p == "claude" and ClaudeAdapter:
return ClaudeAdapter() # type: ignore[operator]
return None
def default_base_url_for(vendor: str) -> Optional[str]:
return _default_base_url_for(vendor)

View File

@@ -33,12 +33,25 @@ def build_client(timeout: float = 60.0) -> httpx.AsyncClient:
print("[agentui.http_client] proxies=", masked, " verify=", verify)
# httpx сам понимает схемы socks://, socks5:// при установленном extras [socks]
client = httpx.AsyncClient(
timeout=timeout,
proxies=proxies,
follow_redirects=True,
verify=verify,
)
try:
client = httpx.AsyncClient(
timeout=timeout,
proxies=proxies,
follow_redirects=True,
verify=verify,
)
except TypeError:
if proxies:
try:
masked = {k: _mask_proxy(v) for k, v in proxies.items()}
except Exception:
masked = proxies
print(f"[agentui.http_client] WARNING: proxies not supported in httpx.AsyncClient, skipping proxies={masked}")
client = httpx.AsyncClient(
timeout=timeout,
follow_redirects=True,
verify=verify,
)
return client

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 337 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 973 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

BIN
favicon_io_saya/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
favicon_io_saya/saya1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

View File

@@ -0,0 +1 @@
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}

View File

@@ -6,147 +6,32 @@
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"http_timeout_sec": 999.0,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfipb98aywtx6jepd5",
"name": "ввв",
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "ввв",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n1",
"type": "RawForward",
"pos_x": 441,
"pos_y": 354,
"config": {
"passthrough_headers": true,
"extra_headers": "{}",
"_origId": "n1"
},
"in": {
"depends": "n5.done"
}
},
{
"id": "n2",
"type": "ProviderCall",
"pos_x": 652,
"pos_y": 46,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": 500,\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[Clod]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"output-128k-2025-02-19\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('enabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(3000) }}\n }\n}"
}
},
"blocks": [
{
"id": "bmfmstojw",
"name": "Great assustant",
"role": "system",
"prompt": "You are Great assustant",
"enabled": true,
"order": 0
},
{
"id": "bmfchnynm",
"name": "Сделай [[OUT1]] красивее",
"role": "user",
"prompt": "Сделай [[OUT1]] красивее",
"enabled": true,
"order": 1
}
],
"_origId": "n2"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n3",
"type": "ProviderCall",
"pos_x": 654,
"pos_y": 566,
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": 500,\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfchn1hq",
"name": "Сделай [[OUT1]] красивее",
"role": "user",
"prompt": "Сделай [[OUT1]] красивее",
"enabled": true,
"order": 0
}
],
"_origId": "n3"
},
"in": {
"depends": "n1.done"
}
},
{
"id": "n4",
"type": "Return",
"pos_x": 1193,
"pos_y": 314,
"config": {
"target_format": "auto",
"text_template": "[[OUT6]] [[Test]]",
"_origId": "n4"
},
"in": {
"depends": "n7.true"
}
},
{
"id": "n5",
"type": "SetVars",
"pos_x": 171,
"pos_y": 487,
"pos_x": 300,
"pos_y": 720,
"config": {
"variables": [
{
@@ -173,18 +58,51 @@
"in": {}
},
{
"id": "n6",
"type": "ProviderCall",
"pos_x": 923,
"pos_y": 345,
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"provider": "openai",
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n7.done"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 552,
"pos_y": 696,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\") ) || [[WAS_ERROR]]",
"ignore_errors": true,
"while_max_iters": 50,
"override_path": "",
"base_url": ""
},
"in": {
"depends": "n5.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 780,
"pos_y": 672,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": 500,\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
"headers": "{\"Authorization\":\"[[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
@@ -192,6 +110,12 @@
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
@@ -201,59 +125,78 @@
},
"blocks": [
{
"id": "bmfmk7g4a",
"name": "New Block",
"role": "system",
"prompt": "",
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатки разобрав каждое действие оценив его логичность и следование истории от 0до10, перепиши эти моменты на нормальные.",
"enabled": true,
"order": 0
},
{
"id": "bmfdyczbd",
"name": "Объедени [[OUT3]], [[OUT4]] сделай более красиво.",
"role": "user",
"prompt": "Объедени [ [[OUT3]], [[OUT2]] ] сделай более красиво. напиши слово \"Красиво\" в конце.",
"enabled": true,
"order": 1
},
{
"id": "bmfh98jkh",
"name": "New Block1",
"role": "system",
"prompt": "1",
"enabled": true,
"order": 2
},
{
"id": "bmfmk74yz",
"name": "New Block",
"role": "assistant",
"prompt": "fuf",
"enabled": true,
"order": 3
}
],
"_origId": "n6"
"_origId": "n4",
"prompt_preprocess": "[[VAR:incoming.json.contents]] delKeyContains \"Okie!\"",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=append",
"while_expr": "([[OUT4]] contains \"Stream failed to\") || ([[OUT4]] contains \"gemini-2.5-pro\") || ([[WAS_ERROR]] == true)",
"ignore_errors": true,
"while_max_iters": 50,
"sleep_ms": 555555000
},
"in": {
"depends": [
"n2.done",
"n3.done",
"n7.false"
]
"depends": "n3.done"
}
},
{
"id": "n7",
"type": "If",
"pos_x": 1311,
"pos_y": 566,
"type": "ProviderCall",
"pos_x": 1080,
"pos_y": 600,
"config": {
"expr": "[[OUT6]] contains \"Красиво\"",
"_origId": "n7"
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"claude_no_system": true,
"while_expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\") || [[WAS_ERROR]] == true",
"ignore_errors": true,
"while_max_iters": 50
},
"in": {
"depends": "n6.done"
"depends": "n4.done"
}
}
]

237
presets/123123123.json Normal file
View File

@@ -0,0 +1,237 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n8.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 564,
"pos_y": 660,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 564,
"pos_y": 888,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатк1.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 792,
"pos_y": 876,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
},
{
"id": "n7",
"type": "ProviderCall",
"pos_x": 1068,
"pos_y": 540,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.false",
"n8.true"
]
}
},
{
"id": "n8",
"type": "If",
"pos_x": 1068,
"pos_y": 876,
"config": {
"expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"_origId": "n8"
},
"in": {
"depends": "n7.done"
}
}
]
}

191
presets/imgtests.json Normal file
View File

@@ -0,0 +1,191 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -603,
"pos_y": 637,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 509,
"pos_y": 459,
"config": {
"target_format": "auto",
"text_template": "[[OUT3]]",
"_origId": "n2"
},
"in": {
"depends": "n1.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 45,
"pos_y": 750,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": "n1.true"
}
},
{
"id": "n1",
"type": "If",
"pos_x": 344,
"pos_y": 730,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": -185.88888888888889,
"pos_y": 523,
"config": {
"provider": "gemini_image",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/gemini-2.5-flash-image-preview:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"gemini-2.5-flash-image-preview\",\n [[OUT3]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [],
"_origId": "n4"
},
"in": {
"depends": "n6.done"
}
},
{
"id": "n6",
"type": "ProviderCall",
"pos_x": -391,
"pos_y": 648,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[VAR:incoming.json.contents]],\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfuw6ayo",
"name": "Создание промпта",
"role": "user",
"prompt": "Создай промпт для генерации изображения исходя из последнего действие {{user}}. Промпт должен быть лаконичный, простенький, без сложных формулировок. В ответе не пиши ничего кроме промпта.",
"enabled": true,
"order": 0
}
],
"_origId": "n6"
},
"in": {
"depends": "n5.done"
}
}
]
}

171
presets/prepprst.json Normal file
View File

@@ -0,0 +1,171 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -125,
"pos_y": 561,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 954,
"pos_y": 564,
"config": {
"target_format": "auto",
"text_template": "[[OUT4]]",
"_origId": "n2"
},
"in": {
"depends": "n6.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 74,
"pos_y": 450.5,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 75,
"pos_y": 909,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 663,
"pos_y": 335,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатки.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 675,
"pos_y": 882.25,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
}
]
}

105
presets/retry.json Normal file
View File

@@ -0,0 +1,105 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -125,
"pos_y": 561,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 507,
"pos_y": 459,
"config": {
"target_format": "auto",
"text_template": "[[OUT3]]",
"_origId": "n2"
},
"in": {
"depends": "n1.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 114,
"pos_y": 425,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 344,
"pos_y": 730,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
}
]
}

View File

@@ -40,7 +40,7 @@
"pos_x": 652,
"pos_y": 46,
"config": {
"provider": "claude",
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
@@ -59,6 +59,12 @@
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[Clod]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"output-128k-2025-02-19\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('enabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(3000) }}\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
}
},
"blocks": [
@@ -77,14 +83,6 @@
"prompt": "Сделай [[OUT1]] красивее",
"enabled": true,
"order": 1
},
{
"id": "bmfmssvy8",
"name": "New Block",
"role": "assistant",
"prompt": "Sure",
"enabled": true,
"order": 2
}
],
"_origId": "n2"
@@ -118,6 +116,12 @@
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
}
},
"blocks": [
@@ -205,6 +209,12 @@
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
}
},
"blocks": [

171
presets/testtesttt.json Normal file
View File

@@ -0,0 +1,171 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": -125,
"pos_y": 561,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 954,
"pos_y": 564,
"config": {
"target_format": "auto",
"text_template": "[[OUT4]]",
"_origId": "n2"
},
"in": {
"depends": "n6.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 72,
"pos_y": 444,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 75,
"pos_y": 909,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 663,
"pos_y": 335,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатк.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 675,
"pos_y": 882.25,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
}
]
}

285
presets/tttttt.json Normal file
View File

@@ -0,0 +1,285 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n8.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 588,
"pos_y": 624,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3"
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 564,
"pos_y": 876,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "openai",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"[[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатк1.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.messages]] & [[PROMPT]]"
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 792,
"pos_y": 876,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
},
{
"id": "n7",
"type": "ProviderCall",
"pos_x": 1056,
"pos_y": 624,
"config": {
"provider": "claude",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.messages]] & [[PROMPT]]",
"claude_no_system": true
},
"in": {
"depends": [
"n6.false",
"n8.true"
]
}
},
{
"id": "n8",
"type": "If",
"pos_x": 1068,
"pos_y": 876,
"config": {
"expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"_origId": "n8"
},
"in": {
"depends": "n7.done"
}
},
{
"id": "n9",
"type": "ProviderCall",
"pos_x": 1104,
"pos_y": 456,
"config": {
"provider": "claude",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"top_p\": 1,\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmg26nusx",
"name": "New Block",
"role": "user",
"prompt": "Hey",
"enabled": true,
"order": 0
}
],
"_origId": "n9"
},
"in": {}
}
]
}

247
presets/tttttt1.json Normal file
View File

@@ -0,0 +1,247 @@
{
"id": "pipeline_editor",
"name": "Edited Pipeline",
"parallel_limit": 8,
"loop_mode": "iterative",
"loop_max_iters": 1000,
"loop_time_budget_ms": 999999999999,
"clear_var_store": true,
"http_timeout_sec": 999,
"text_extract_strategy": "auto",
"text_extract_json_path": "",
"text_join_sep": "\n",
"text_extract_presets": [
{
"id": "pmfqonx6fvcubc09k4ep",
"name": "candidates.0.content.parts.1.inlineData.data",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData.data",
"join_sep": "\n"
},
{
"id": "pmfqrelw6wu9rutnzk1",
"name": "candidates.0.content.parts.1.inlineData",
"strategy": "jsonpath",
"json_path": "candidates.0.content.parts.1.inlineData",
"join_sep": "\n"
}
],
"nodes": [
{
"id": "n5",
"type": "SetVars",
"pos_x": 12,
"pos_y": 780,
"config": {
"variables": [
{
"id": "vmfi99ftc",
"name": "Clod",
"mode": "string",
"value": "igrovik"
},
{
"id": "vmfi99gjw",
"name": "MyOpenAiKey",
"mode": "string",
"value": "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"
},
{
"id": "vmfjkn09i",
"name": "NAMETest",
"mode": "expr",
"value": "128 + 64"
}
],
"_origId": "n5"
},
"in": {}
},
{
"id": "n2",
"type": "Return",
"pos_x": 1344,
"pos_y": 756,
"config": {
"target_format": "auto",
"text_template": "[[OUT7]]",
"_origId": "n2"
},
"in": {
"depends": "n8.false"
}
},
{
"id": "n3",
"type": "RawForward",
"pos_x": 588,
"pos_y": 624,
"config": {
"passthrough_headers": true,
"extra_headers": "{\"connection\": \"close\"}",
"_origId": "n3",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n5.done",
"n1.true"
]
}
},
{
"id": "n1",
"type": "If",
"pos_x": 600,
"pos_y": 876,
"config": {
"expr": "([[OUT3]] contains \"Stream&#32;failed&#32;to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"_origId": "n1"
},
"in": {
"depends": "n3.done"
}
},
{
"id": "n4",
"type": "ProviderCall",
"pos_x": 792,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"[[VAR:incoming.headers.authorization]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://api.anthropic.com",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"[[VAR:incoming.headers.x-api-key]]\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfwy94ev",
"name": "Твой ответ недостаточно хорош",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT3]]\n```\nнедостаточно хорош, при его написании ты не следовал инструкциям. переделай исходя из инструкций, найди недостатки разобрав каждое действие оценив его логичность и следование истории от 0до10, перепиши эти моменты на нормальные.",
"enabled": true,
"order": 0
}
],
"_origId": "n4",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n6.true",
"n1.false"
]
}
},
{
"id": "n6",
"type": "If",
"pos_x": 852,
"pos_y": 960,
"config": {
"expr": "([[OUT4]] contains \"Stream&#32;failed&#32;to\") || ([[OUT4]] contains \"gemini-2.5-pro\")",
"_origId": "n6"
},
"in": {
"depends": "n4.done"
}
},
{
"id": "n7",
"type": "ProviderCall",
"pos_x": 1080,
"pos_y": 624,
"config": {
"provider": "gemini",
"provider_configs": {
"openai": {
"base_url": "https://api.openai.com",
"endpoint": "/v1/chat/completions",
"headers": "{\"Authorization\":\"Bearer [[MyOpenAiKey]]\"}",
"template": "{\n \"model\": \"gpt-5-chat-latest\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_completion_tokens\": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},\n \"presence_penalty\": {{ incoming.json.presence_penalty|default(0) }},\n \"frequency_penalty\": {{ incoming.json.frequency_penalty|default(0) }},\n \"stop\": {{ incoming.json.stop|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }}\n}"
},
"gemini": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]",
"headers": "{}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]],\n \"safetySettings\": {{ incoming.json.safetySettings|default([]) }},\n \"generationConfig\": {\n \"temperature\": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},\n \"topP\": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},\n \"maxOutputTokens\": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},\n \"stopSequences\": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},\n \"candidateCount\": {{ incoming.json.generationConfig.candidateCount|default(1) }},\n \"thinkingConfig\": {\n \"includeThoughts\": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},\n \"thinkingBudget\": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}\n }\n }\n}"
},
"gemini_image": {
"base_url": "https://generativelanguage.googleapis.com",
"endpoint": "/v1beta/models/{{ model }}:generateContent",
"headers": "{\"x-goog-api-key\":\"[[VAR:incoming.api_keys.key]]\"}",
"template": "{\n \"model\": \"{{ model }}\",\n [[PROMPT]]\n}"
},
"claude": {
"base_url": "https://proxy.malepreg.lol/proxy/aws/claude",
"endpoint": "/v1/messages",
"headers": "{\"x-api-key\":\"igrovik\",\"anthropic-version\":\"2023-06-01\",\"anthropic-beta\":\"[[VAR:incoming.headers.anthropic-beta]]\"}",
"template": "{\n \"model\": \"claude-opus-4-20250514\",\n [[PROMPT]],\n \"temperature\": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},\n \"top_p\": {{ incoming.json.top_p|default(params.top_p|default(1)) }},\n \"max_tokens\": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},\n \"stop_sequences\": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},\n \"stream\": {{ incoming.json.stream|default(false) }},\n \"thinking\": {\n \"type\": \"{{ incoming.json.thinking.type|default('disabled') }}\",\n \"budget_tokens\": {{ incoming.json.thinking.budget_tokens|default(0) }}\n },\n \"anthropic_version\": \"{{ anthropic_version|default('2023-06-01') }}\"\n}"
}
},
"blocks": [
{
"id": "bmfzvzpl7",
"name": "Может содержать такие конструкции",
"role": "user",
"prompt": "Твой ответ:\n```\n[[OUT4]]\n```\nМожет содержать такие конструкции:\n**'Not X, but Y'** narrative structure. This includes any and all variations of stating what something *is not* in order to emphasize what it *is*. Нужно заменить места на нормальный нарратив.",
"enabled": true,
"order": 0
}
],
"_origId": "n7",
"prompt_combine": "[[VAR:incoming.json.contents]] & [[PROMPT]]@pos=-1",
"claude_no_system": true,
"while_expr": "([[OUT3]] contains \"Stream failed to\") || ([[OUT3]] contains \"gemini-2.5-pro\")",
"ignore_errors": false,
"while_max_iters": 50
},
"in": {
"depends": [
"n6.false",
"n8.true"
]
}
},
{
"id": "n8",
"type": "If",
"pos_x": 1068,
"pos_y": 876,
"config": {
"expr": "([[OUT7]] contains \"Stream failed to\") || ([[OUT7]] contains \"gemini-2.5-pro\")",
"_origId": "n8"
},
"in": {
"depends": "n7.done"
}
}
]
}

View File

@@ -1,8 +1,8 @@
fastapi==0.112.2
fastapi==0.115.2
uvicorn==0.30.6
pydantic==2.8.2
httpx==0.27.0
starlette==0.38.2
httpx[socks]==0.27.0
starlette==0.40.0
brotlicffi
brotli

View File

@@ -1,27 +1,51 @@
@echo off
setlocal
chcp 65001 >NUL
set PORT=7860
echo Installing dependencies...
python -m pip install --upgrade pip
REM -------- Config --------
if "%PORT%"=="" set PORT=7860
if "%HOST%"=="" set HOST=127.0.0.1
REM ------------------------
echo [НадTavern] Preparing virtual environment...
REM Pick Python launcher
where py >NUL 2>&1
if %ERRORLEVEL%==0 (
set PY=py
) else (
set PY=python
)
REM Create venv if missing
if not exist ".venv\Scripts\python.exe" (
%PY% -m venv .venv
if errorlevel 1 goto :fail
)
set "VENV_PY=.venv\Scripts\python.exe"
echo [НадTavern] Upgrading pip...
"%VENV_PY%" -m pip install --upgrade pip
if errorlevel 1 goto :fail
pip install -r requirements.txt
echo [НадTavern] Installing dependencies from requirements.txt...
"%VENV_PY%" -m pip install -r requirements.txt
if errorlevel 1 goto :fail
echo Starting НадTavern on http://127.0.0.1:%PORT%/
echo [НадTavern] Starting on http://%HOST%:%PORT%/
timeout /t 1 /nobreak >NUL
start "" "http://127.0.0.1:%PORT%/ui/editor.html"
python -m uvicorn agentui.api.server:app --host 127.0.0.1 --port %PORT% --log-level info
start "" "http://%HOST%:%PORT%/ui/editor.html"
"%VENV_PY%" -m uvicorn agentui.api.server:app --host %HOST% --port %PORT% --log-level info
if errorlevel 1 goto :fail
goto :end
:fail
echo.
echo Server failed with errorlevel %errorlevel%.
echo [НадTavern] Server failed with errorlevel %errorlevel%.
echo Check the console output above and the file agentui.log for details.
pause
:end
pause
endlocal

View File

@@ -1,10 +1,11 @@
#!/usr/bin/env bash
set -euo pipefail
# НадTavern Linux launcher
# НадTavern Linux/macOS launcher with local .venv bootstrap
# Usage:
# chmod +x ./run_agentui.sh
# ./run_agentui.sh
# Optional env: HOST=0.0.0.0 PORT=7860
# Go to repo root (script location)
cd "$(dirname "$0")"
@@ -12,18 +13,28 @@ cd "$(dirname "$0")"
PORT="${PORT:-7860}"
HOST="${HOST:-127.0.0.1}"
echo "Installing dependencies..."
# Pick python
if command -v python3 >/dev/null 2>&1; then
PY=python3
else
PY=python
fi
"$PY" -m pip install --upgrade pip
"$PY" -m pip install -r requirements.txt
# Create venv if missing
if [ ! -f ".venv/bin/python" ]; then
echo "[НадTavern] Creating .venv ..."
"$PY" -m venv .venv
fi
echo "Starting НадTavern on http://$HOST:$PORT/"
VENV_PY=".venv/bin/python"
echo "[НадTavern] Upgrading pip ..."
"$VENV_PY" -m pip install --upgrade pip
echo "[НадTavern] Installing deps from requirements.txt ..."
"$VENV_PY" -m pip install -r requirements.txt
echo "[НадTavern] Starting on http://$HOST:$PORT/"
# Try to open UI editor in default browser (non-fatal if fails)
if command -v xdg-open >/dev/null 2>&1; then
@@ -32,4 +43,4 @@ elif command -v open >/dev/null 2>&1; then
open "http://$HOST:$PORT/ui/editor.html" >/dev/null 2>&1 || true
fi
exec "$PY" -m uvicorn agentui.api.server:app --host "$HOST" --port "$PORT" --log-level info
exec "$VENV_PY" -m uvicorn agentui.api.server:app --host "$HOST" --port "$PORT" --log-level info

View File

@@ -15,31 +15,38 @@ html, body, button, input, select, textarea, code, pre, a, .chip-btn, .group-tit
--node: #0e1116;
--node-border: #334155;
--node-selected: #1f2937;
/* Базовый цвет проводов по умолчанию */
--connector: #7aa2f7;
--connector-muted: #3b82f6;
/* Неброские цвета для разных типов/веток */
--wire-true: #34d399; /* мягкий зелёный для If:true */
--wire-false: #94a3b8; /* сланцево‑серый для If:false */
--wire-provider: #5b86e5; /* приглушённый синий */
--wire-raw: #8b7de6; /* мягкий фиолетовый */
--wire-setvars: #4fbfa0; /* приглушённая мята */
--wire-return: #93a9d1; /* холодный серо‑синий */
/* DRY tokens: unified shadows and transitions */
--ring3-22-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35);
--ring3-20-shadow: 0 0 0 3px rgba(96,165,250,.20), 0 4px 10px rgba(0,0,0,.35);
--ring2-20-shadow: 0 0 0 2px rgba(96,165,250,.20), 0 2px 6px rgba(0,0,0,.35);
--focus-ring3-20: 0 0 0 3px rgba(96,165,250,.20);
--focus-ring3-22: 0 0 0 3px rgba(96,165,250,.22);
--tr-base: border-color .12s ease, box-shadow .12s ease, background-color .12s ease, color .12s ease;
--tr-pop: transform .12s ease;
--tr-pop-fast: transform .08s ease;
}
html, body {
height: 100%;
overflow: hidden; /* убираем общие скролл-бары страницы, чтобы не перекрывать правую стрелку */
}
/* Глобальные контейнеры и скроллы */
html, body {
height: 100%;
overflow: hidden; /* убираем общие скролл-бары страницы */
}
#container {
position: relative; /* якорь для абсолютных стрелок-переключателей */
}
/* Глобальные контейнеры и скроллы */
html, body {
height: 100%;
overflow: hidden; /* убираем общие скролл-бары страницы */
}
#container {
position: relative; /* якорь для абсолютных стрелок-переключателей */
}
/* Grid areas to hard-pin layout regardless of hidden panels or absolute children */
#container {
display: grid;
@@ -77,7 +84,36 @@ html, body {
border: 1px solid var(--node-border);
color: #e5e7eb;
border-radius: 12px 12px 0 0;
padding: 6px 10px;
padding: 4px 8px; /* компактнее заголовок */
font-size: 12px; /* компактнее шрифт заголовка */
line-height: 1.2;
}
/* Иконка типа ноды в заголовке (монохромная, спокойная) */
.drawflow .drawflow-node .title-box .node-ico {
display: inline-block;
width: 14px;
height: 14px;
margin-right: 6px;
vertical-align: -2px;
background-size: 14px 14px;
background-repeat: no-repeat;
filter: opacity(.9);
}
/* SVG-иконки по типам (цвета под стиль проекта) */
.drawflow .drawflow-node .title-box .node-ico-If {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%2394a3b8' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M6 4v6a4 4 0 0 0 4 4h4'/><polyline points='14 14 18 10 14 6'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-ProviderCall {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%235b86e5' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M3 15a4 4 0 0 0 4 4h10a4 4 0 0 0 4-4'/><path d='M7 19V5a4 4 0 0 1 4-4h2a4 4 0 0 1 4 4v14'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-RawForward {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%238b7de6' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><polyline points='5 12 19 12'/><polyline points='12 5 19 12 12 19'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-SetVars {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%234fbfa0' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><circle cx='12' cy='12' r='3'/><path d='M19.4 15a1.65 1.65 0 0 0 .33 1.82l.06.06a2 2 0 1 1-2.83 2.83l-.06-.06A1.65 1.65 0 0 0 15 19.4a1.65 1.65 0 0 0-1 .6l-.09.1a2 2 0 1 1-3.2 0l-.09-.1a1.65 1.65 0 0 0-1-.6 1.65 1.65 0 0 0-1.82.33l-.06.06a2 2 0 1 1-2.83-2.83l.06-.06A1.65 1.65 0 0 0 4.6 15a1.65 1.65 0 0 0-.6-1l-.1-.09a2 2 0 1 1 0-3.2l.1-.09a1.65 1.65 0 0 0 .6-1 1.65 1.65 0 0 0-.33-1.82l-.06-.06A2 2 0 1 1 6.94 2.6l.06.06A1.65 1.65 0 0 0 8 3.6a1.65 1.65 0 0 0 1-.6l.09-.1a2 2 0 1 1 3.2 0l.09.1a1.65 1.65 0 0 0 1 .6 1.65 1.65 0 0 0 1.82-.33l.06-.06a2 2 0 1 1 2.83 2.83l-.06.06a1.65 1.65 0 0 0-.33 1.82 1.65 1.65 0 0 0 .6 1l.1.09a2 2 0 1 1 0 3.2l-.1.09a1.65 1.65 0 0 0-.6 1z'/></svg>");
}
.drawflow .drawflow-node .title-box .node-ico-Return {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%2393a9d1' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'><path d='M9 10l-5 5 5 5'/><path d='M20 4v7a4 4 0 0 1-4 4H4'/></svg>");
}
.drawflow .drawflow-node .box {
@@ -87,11 +123,28 @@ html, body {
color: #e5e7eb;
border-radius: 0 0 12px 12px;
overflow: hidden; /* не даём контенту вылезать за края */
font-size: 11px; /* компактнее содержимое */
line-height: 1.25;
}
/* Контент превью внутри .box: можем скрывать его в LOD, не ломая геометрию ноды */
/* Контент превью внутри .box: можем скрывать его в LOD, не меняя коробку ноды */
.drawflow .drawflow-node .node-preview {
pointer-events: none;
pointer-events: auto; /* разрешаем клики по summary (<details>) */
opacity: .85;
font-size: 10.5px; /* мелкий общий текст превью */
}
/* На самом канвасе поля превью недоступны для редактирования/клика */
.drawflow .drawflow-node .node-preview input,
.drawflow .drawflow-node .node-preview textarea {
pointer-events: none;
}
.drawflow .drawflow-node .node-preview label {
font-size: 10px;
margin: 4px 0 2px;
}
/* Адресные поля читаемые «обычным» кеглем */
.drawflow .drawflow-node .node-preview .np-url,
.drawflow .drawflow-node .node-preview .np-endpoint {
font-size: 12px !important;
}
.drawflow .drawflow-node .box textarea,
@@ -104,6 +157,9 @@ html, body {
width: 100%;
max-width: 100%;
box-sizing: border-box;
padding: 6px 8px; /* компактнее поля превью */
font-size: 10.5px; /* мелкий текст по умолчанию */
resize: none; /* запрет изменения размера на канвасе */
}
.df-node .box textarea {
@@ -112,6 +168,7 @@ html, body {
overflow-y: auto; /* только вертикальный скролл при необходимости */
overflow-x: hidden; /* убираем горизонтальный скролл внутри textarea */
max-height: 180px; /* предотвращаем бесконечную высоту */
resize: none; /* запрет ручного ресайза превью */
}
/* Выделение выбранного узла — мягкое */
@@ -120,6 +177,15 @@ html, body {
border-color: var(--accent);
box-shadow: 0 0 0 1px color-mix(in srgb, var(--accent) 40%, transparent);
}
/* Привести disabled к виду обычных превью (без «серости» браузера) */
.drawflow .drawflow-node .box input[disabled],
.drawflow .drawflow-node .box textarea[disabled] {
opacity: 1;
color: #e5e7eb;
background: #0f141a;
border-color: #2b3646;
cursor: default;
}
/* Порты: более аккуратные, без «оранжевого» */
.drawflow .drawflow-node .inputs .input,
@@ -131,19 +197,36 @@ html, body {
box-shadow: 0 0 0 2px rgba(0,0,0,.25);
}
/* Линии соединений: плавные, аккуратные цвета */
/* Линии соединений: тоньше и спокойнее */
.drawflow .connection .main-path {
stroke: var(--connector) !important;
/* Толщина линии масштабируется от зума (var(--zoom) задаётся на #canvas из JS) */
stroke-width: clamp(1.6px, calc(3px / var(--zoom, 1)), 6px) !important;
opacity: 0.95 !important;
stroke-width: clamp(1px, calc(2.2px / var(--zoom, 1)), 4.5px) !important;
opacity: 0.9 !important;
stroke-linecap: round; /* сглаженные окончания */
stroke-linejoin: round; /* сглажённые соединения */
}
/* Connection styling classes (set by JS; stable even if Drawflow re-renders paths) */
.drawflow .connection.conn-if-true .main-path {
stroke: var(--wire-true) !important;
stroke-dasharray: 6 6 !important;
}
.drawflow .connection.conn-if-false .main-path {
stroke: var(--wire-false) !important;
stroke-dasharray: 6 6 !important;
}
.drawflow .connection.conn-provider .main-path { stroke: var(--wire-provider) !important; }
.drawflow .connection.conn-raw .main-path { stroke: var(--wire-raw) !important; }
.drawflow .connection.conn-setvars .main-path { stroke: var(--wire-setvars) !important; }
.drawflow .connection.conn-return .main-path { stroke: var(--wire-return) !important; }
/* Подсветка входящих к ошибочной ноде рёбер (мягкий красный) */
.drawflow .connection.conn-upstream-err .main-path { stroke: #ef4444 !important; opacity: .95 !important; }
.drawflow .connection .main-path.selected,
.drawflow .connection:hover .main-path {
stroke: var(--accent-2) !important;
/* На hover/selected — немного толще базовой формулы */
stroke-width: clamp(2px, calc(3.6px / var(--zoom, 1)), 7px) !important;
/* На hover/selected — слегка толще базовой формулы */
stroke-width: clamp(1.3px, calc(2.6px / var(--zoom, 1)), 5px) !important;
}
/* Точки изгибов/ручки */
@@ -268,19 +351,19 @@ a.chip-btn {
color: #e5e7eb;
border: 1px solid #334155;
box-shadow: 0 2px 6px rgba(0,0,0,.35);
transition: transform .12s ease, box-shadow .12s ease, background-color .12s ease, border-color .12s ease, color .12s ease;
transition: var(--tr-base), var(--tr-pop);
user-select: none;
}
.chip-btn:hover,
a.chip-btn:hover {
background: #1f2937;
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35);
box-shadow: var(--ring3-22-shadow);
}
.chip-btn:active,
a.chip-btn:active {
transform: translateY(1px);
box-shadow: 0 0 0 2px rgba(96,165,250,.20), 0 2px 6px rgba(0,0,0,.35);
box-shadow: var(--ring2-20-shadow);
}
/* Инпуты и селекты в шапке — в одном визуальном ряду с чипами */
@@ -297,7 +380,7 @@ a.chip-btn:active {
}
.top-input:focus {
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.20);
box-shadow: var(--focus-ring3-20);
}
/* Внутренние заголовки в блоке ноды */
@@ -322,7 +405,7 @@ a.chip-btn:active {
box-shadow: 0 2px 6px rgba(0,0,0,.35);
cursor: pointer;
opacity: .85;
transition: transform .12s ease, opacity .12s ease, box-shadow .12s ease, border-color .12s ease, background-color .12s ease;
transition: var(--tr-base), var(--tr-pop), opacity .12s ease;
}
.drawflow .connection:hover foreignObject,
.drawflow .connection:hover [class*="remove"],
@@ -331,7 +414,7 @@ a.chip-btn:active {
opacity: 1;
transform: scale(1.05);
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.20), 0 4px 10px rgba(0,0,0,.35);
box-shadow: var(--ring3-20-shadow);
}
/* If delete control is rendered inside foreignObject, normalize inner box */
.drawflow .connection foreignObject div,
@@ -685,7 +768,7 @@ a.chip-btn:active {
/* Port hover affordance (no heavy effects) */
.drawflow .drawflow-node .inputs .input,
.drawflow .drawflow-node .outputs .output {
transition: transform .08s ease;
transition: var(--tr-pop-fast);
will-change: transform;
}
.drawflow .drawflow-node .inputs .input:hover,
@@ -712,18 +795,18 @@ a.chip-btn:active {
box-shadow: 0 2px 6px rgba(0,0,0,.35) !important;
cursor: pointer !important;
z-index: 10 !important;
transition: transform .12s ease, box-shadow .12s ease, background-color .12s ease, border-color .12s ease, color .12s ease !important;
transition: var(--tr-base), var(--tr-pop) !important;
}
.drawflow .drawflow-node .close:hover {
transform: scale(1.06) !important;
background: #1f2937 !important;
border-color: var(--accent-2) !important;
color: #f8fafc !important;
box-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35) !important;
box-shadow: var(--ring3-22-shadow) !important;
}
.drawflow .drawflow-node .close:active {
transform: scale(0.98) !important;
box-shadow: 0 0 0 2px rgba(96,165,250,.20), 0 2px 6px rgba(0,0,0,.35) !important;
box-shadow: var(--ring2-20-shadow) !important;
}
/* Drawflow floating delete handle (class: .drawflow-delete) — restyle but keep behavior */
#drawflow .drawflow-delete,
@@ -741,7 +824,7 @@ a.chip-btn:active {
box-shadow: 0 2px 6px rgba(0,0,0,.35) !important;
cursor: pointer !important;
z-index: 1000 !important;
transition: transform .12s ease, box-shadow .12s ease, background-color .12s ease, border-color .12s ease !important;
transition: var(--tr-base), var(--tr-pop) !important;
}
#drawflow .drawflow-delete::before,
.drawflow-delete::before {
@@ -757,7 +840,7 @@ a.chip-btn:active {
transform: translate(-50%, -50%) scale(1.06) !important;
background: #1f2937 !important;
border-color: var(--accent-2) !important;
box-shadow: 0 0 0 3px rgba(96,165,250,.22), 0 4px 10px rgba(0,0,0,.35) !important;
box-shadow: var(--ring3-22-shadow) !important;
}
#drawflow .drawflow-delete:active,
.drawflow-delete:active {
@@ -766,7 +849,7 @@ a.chip-btn:active {
/* Execution highlight states (SSE-driven) */
.drawflow .drawflow-node .title-box,
.drawflow .drawflow-node .box {
transition: border-color .12s ease, box-shadow .12s ease, background-color .12s ease;
transition: var(--tr-base);
}
.drawflow .drawflow-node.node-running .title-box,
@@ -808,16 +891,6 @@ a.chip-btn:active {
transform: translate(-50%, -100%);
z-index: 1000; /* above nodes/edges but below menus */
}
/* Снимаем скролл-бары с контейнера Drawflow, чтобы не перекрывать правую стрелку */
#drawflow {
overflow: hidden !important;
position: relative;
z-index: 1; /* гарантируем, что канвас виден под HUD и над фоном */
/* Растянем контейнер Drawflow на всю центральную колонку */
width: 100%;
height: 100%;
display: block;
}
/* Panels collapse controls and layout */
#container.collapse-left {
@@ -1002,11 +1075,7 @@ select#pm-role {
outline: none;
font: 12px/1 Inter, system-ui, Arial, sans-serif;
transition:
border-color .12s ease,
box-shadow .12s ease,
background-color .12s ease,
color .12s ease;
transition: var(--tr-base);
}
/* Hover and focus states consistent with .top-input */
@@ -1025,7 +1094,7 @@ select#vars-scope:focus,
select.v-mode:focus,
select#pm-role:focus {
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.20);
box-shadow: var(--focus-ring3-20);
}
/* Compact width contexts: keep natural width unless container forces 100% */
@@ -1202,4 +1271,334 @@ header { position: relative; }
}
#inspector .var-row .v-del {
flex: 0 0 auto;
}
/* --- Wire labels and arrows overlay --- */
#wire-labels {
position: absolute;
inset: 0;
pointer-events: none;
z-index: 4; /* над линиями, под панелями */
}
.wire-label {
position: absolute;
transform: translate(-50%, -50%);
background: #10151c;
color: #e5e7eb;
border: 1px solid rgba(148,163,184,.35);
border-radius: 6px;
padding: 1px 4px;
font: 10px/1.2 Inter, system-ui, Arial, sans-serif;
white-space: nowrap;
opacity: .9;
user-select: none;
}
.wire-arrow {
position: absolute;
width: 0;
height: 0;
border-left: 6px solid transparent;
border-right: 6px solid transparent;
border-top: 8px solid var(--connector); /* перекрашивается inline из цвета линии */
transform-origin: 50% 70%;
opacity: .95;
}
/* Димминг посторонних связей при фокусе ноды */
.drawflow .connection.dim .main-path {
opacity: .35 !important;
}
/* --- Сворачиваемые блоки превью в нодах --- */
.np-coll { margin: 4px 0; }
.np-coll > summary {
list-style: none;
cursor: pointer;
color: var(--muted);
font-size: 10px;
margin: 4px 0 2px;
}
.np-coll > summary::-webkit-details-marker { display: none; }
.np-coll[open] > summary { color: #cbd5e1; }
/* groups overlay removed */
/* --- Canvas preview sanitization: hide hints/labels/checkboxes (only on canvas node previews) --- */
/* Скрываем визуальные хинты, подписи и «галочки» только внутри превью нод на канвасе.
Summary секции (headers/template) остаются видимыми, textarea/inputs продолжают отображать значения. */
#canvas .drawflow .drawflow-node .node-preview .hint,
#canvas .drawflow .drawflow-node .node-preview label,
#canvas .drawflow .drawflow-node .node-preview input[type="checkbox"] {
display: none !important;
}
/* --- Unified checkbox style across UI --- */
/* Единый тёмный стиль чекбоксов под тему проекта (акцент — var(--accent-2)).
Применяется ко всей UI (инспектор, «Запуск», Prompt Blocks, STOREпанель и т.д.).
На канвасе в превью чекбоксы скрыты блоком выше. */
input[type="checkbox"] {
-webkit-appearance: none;
appearance: none;
width: 16px;
height: 16px;
display: inline-block;
vertical-align: -2px;
border: 1px solid #334155;
border-radius: 4px;
background: #0f141a;
box-shadow: 0 0 0 0 rgba(96,165,250,0.0);
transition:
background-color .12s ease,
border-color .12s ease,
box-shadow .12s ease,
transform .06s ease;
cursor: pointer;
}
input[type="checkbox"]:hover {
background: #121820;
border-color: var(--accent-2);
box-shadow: 0 0 0 3px rgba(96,165,250,.18);
}
input[type="checkbox"]:active {
transform: scale(0.96);
}
input[type="checkbox"]:checked {
border-color: var(--accent-2);
background-color: #0f141a;
background-image: url("data:image/svg+xml;utf8,&lt;svg xmlns='http://www.w3.org/2000/svg' width='14' height='14' viewBox='0 0 24 24' fill='none' stroke='%2360a5fa' stroke-width='3' stroke-linecap='round' stroke-linejoin='round'&gt;&lt;polyline points='20 6 9 17 4 12'/&gt;&lt;/svg&gt;");
background-repeat: no-repeat;
background-position: center;
background-size: 12px 12px;
}
input[type="checkbox"]:focus-visible {
outline: none;
border-color: var(--accent-2);
box-shadow: var(--focus-ring3-22);
}
input[type="checkbox"]:disabled {
opacity: .6;
cursor: not-allowed;
box-shadow: none;
}
/* --- Enhanced checkbox visual: add glowing blue dot at center --- */
/* Применяется ко всем чекбоксам в UI (инспектор, Запуск, Prompt Blocks, STORE и т.д.).
В превью нод на канвасе чекбоксы скрыты ранее добавленным правилом. */
input[type="checkbox"] {
position: relative; /* для центрирования псевдо-элемента */
overflow: visible; /* безопасно для свечения */
}
input[type="checkbox"]::after {
content: "";
position: absolute;
left: 50%;
top: 50%;
width: 6px;
height: 6px;
border-radius: 999px;
background: var(--accent-2);
transform: translate(-50%, -50%) scale(0.6);
opacity: .6;
/* мягкое синее свечение в покое */
box-shadow:
0 0 4px rgba(96,165,250,.45),
0 0 10px rgba(96,165,250,.25);
transition:
transform .12s ease,
opacity .12s ease,
box-shadow .12s ease;
}
input[type="checkbox"]:checked::after {
transform: translate(-50%, -50%) scale(1.0);
opacity: 1;
/* усиленное свечение при включении */
box-shadow:
0 0 6px rgba(96,165,250,.80),
0 0 14px rgba(96,165,250,.60),
0 0 24px rgba(96,165,250,.35);
}
input[type="checkbox"]:disabled::after {
opacity: .35;
box-shadow: 0 0 2px rgba(96,165,250,.25);
}
/* --- Unified number input style across UI --- */
/* Единый стиль для всех input[type=number], включая инспектор, «Запуск», SERVICEпанели и т.д. */
input[type="number"] {
width: 100%;
background: #0f141a;
color: #e5e7eb;
border: 1px solid #2b3646;
border-radius: 8px;
padding: 6px 8px;
height: 32px;
box-sizing: border-box;
font: 12px/1 Inter, system-ui, Arial, sans-serif;
transition: var(--tr-base);
}
input[type="number"]:hover {
background: #121820;
border-color: var(--accent-2);
}
input[type="number"]:focus {
outline: none;
border-color: var(--accent-2);
box-shadow: var(--focus-ring3-20);
}
input[type="number"]:disabled {
opacity: .6;
cursor: not-allowed;
box-shadow: none;
}
/* Убираем нативные «стрелочки», чтобы стиль был единым во всех браузерах */
input[type="number"]::-webkit-outer-spin-button,
input[type="number"]::-webkit-inner-spin-button {
-webkit-appearance: none;
margin: 0;
}
input[type="number"] {
-moz-appearance: textfield;
}
/* --- Canvas preview sanitization (напоминание): хинты/лейблы/чекбоксы скрыты в превью --- */
/* Секции summary (headers/template) остаются видимыми */
/* --- Logs panel: base layout ------------------------------------------------- */
#logs-list { --log-border: #1f2b3b; }
#logs-list .logs-row {
padding: 8px 10px;
border-bottom: 1px solid var(--log-border);
background: #0f141a;
cursor: pointer;
transition: background-color .15s ease, box-shadow .15s ease, opacity .2s ease;
}
#logs-list .logs-row:hover { background: #111821; }
#logs-list .logs-row.selected {
outline: 0;
box-shadow: inset 0 0 0 2px color-mix(in srgb, var(--accent-2) 40%, transparent);
}
#logs-list .logs-row.dim { opacity: .70; }
#logs-list .logs-row .title { font-size: 13px; }
#logs-list .logs-row .sub {
font-size: 11px;
opacity: .85;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
/* --- HTTP status styling ----------------------------------------------------- */
/* Shimmer animation for pending HTTP rows */
@keyframes logs-shimmer {
0% { background-position: -200% 0; }
100% { background-position: 200% 0; }
}
/* Pending request (no response yet): blue accent shimmer using --accent-2 */
#logs-list .logs-row.kind-http.http-pending {
border-left: 3px solid var(--accent-2);
background:
linear-gradient(90deg,
color-mix(in srgb, var(--accent-2) 10%, transparent) 0%,
color-mix(in srgb, var(--accent-2) 20%, transparent) 50%,
color-mix(in srgb, var(--accent-2) 10%, transparent) 100%);
background-size: 200% 100%;
animation: logs-shimmer 1.4s linear infinite;
}
/* Success/Failure borders for completed HTTP rows */
#logs-list .logs-row.kind-http.http-ok { border-left: 3px solid #10b981; } /* emerald-500 */
#logs-list .logs-row.kind-http.http-err { border-left: 3px solid #ef4444; } /* red-500 */
/* --- Node sleep pulse -------------------------------------------------------- */
@keyframes logs-sleep-pulse {
0% { box-shadow: inset 0 0 0 0 rgba(245,158,11, 0.00); }
50% { box-shadow: inset 0 0 0 2px rgba(245,158,11, 0.35); }
100% { box-shadow: inset 0 0 0 0 rgba(245,158,11, 0.00); }
}
#logs-list .logs-row.kind-node.ev-sleep {
border-left: 3px dashed #f59e0b; /* amber-500 */
animation: logs-sleep-pulse 1.8s ease-in-out infinite;
}
/* --- Node "water" running effect (blue→violet gradient, slow, saturated) ----- */
@keyframes node-water {
0% { background-position: 0% 0%; }
50% { background-position: 100% 100%; }
100% { background-position: 0% 0%; }
}
/* Применяется, когда нода исполняется (класс .node-running вешается из SSE) */
#drawflow .drawflow-node.node-running .title-box,
#drawflow .drawflow-node.node-running .box {
/* База: наш стандартный фон ноды, сверху — насыщенный градиент воды */
background:
linear-gradient(125deg,
color-mix(in srgb, #60a5fa 78%, transparent) 0%,
color-mix(in srgb, #7c3aed 56%, transparent) 50%,
color-mix(in srgb, #60a5fa 78%, transparent) 100%),
var(--node);
background-size: 360% 360%;
animation: node-water 5.0s ease-in-out infinite; /* медленнее и гуще, «водная гладь» */
border-color: color-mix(in srgb, var(--accent-2) 55%, #7c3aed 45%);
/* Лёгкое свечение, чтобы подчеркнуть активность, без ядовитости */
box-shadow:
0 0 0 2px color-mix(in srgb, var(--accent-2) 30%, transparent),
0 0 16px rgba(96,165,250,.18),
inset 0 0 22px rgba(167,139,250,.12);
}
/* Безопасность: при успехе/ошибке временные классы могут перебить рамку */
#drawflow .drawflow-node.node-ok .title-box,
#drawflow .drawflow-node.node-ok .box {
border-color: #10b981 !important; /* emerald */
box-shadow:
0 0 0 2px color-mix(in srgb, #10b981 35%, transparent),
0 0 12px rgba(16,185,129,.18);
background-image: none; /* убрать «воду» после окончания */
}
#drawflow .drawflow-node.node-err .title-box,
#drawflow .drawflow-node.node-err .box {
border-color: #ef4444 !important; /* red */
box-shadow:
0 0 0 2px color-mix(in srgb, #ef4444 35%, transparent),
0 0 12px rgba(239,68,68,.18);
background-image: none; /* убрать «воду» после ошибки */
}
/* --- Water overlay: full-node coverage with fade-out on stop ------------------ */
/* База: прозрачный градиент-оверлей на ВСЕЙ .drawflow_content_node,
который плавно меняет прозрачность. Когда нода активна (.node-running) —
поднимаем непрозрачность и двигаем «волну». При снятии .node-running
оверлей сам «затухает» благодаря transition на opacity. */
#drawflow .drawflow-node .drawflow_content_node {
position: relative;
overflow: hidden;
z-index: 0; /* чтобы ::before можно было поднять поверх */
}
#drawflow .drawflow-node .drawflow_content_node::before {
content: '';
position: absolute;
inset: 0;
z-index: 1; /* поверх содержимого ноды, но без кликов */
pointer-events: none;
border-radius: 10px;
background: linear-gradient(125deg,
color-mix(in srgb, #60a5fa 78%, transparent) 0%,
color-mix(in srgb, #7c3aed 56%, transparent) 50%,
color-mix(in srgb, #60a5fa 78%, transparent) 100%);
background-size: 360% 360%;
opacity: 0; /* по умолчанию невидим */
transition: opacity 1.4s ease-in-out; /* «затухание» при остановке */
}
#drawflow .drawflow-node.node-running .drawflow_content_node::before {
opacity: .42; /* насыщенно, но читаемо; плавно исчезает при снятии класса */
animation: node-water 5.0s ease-in-out infinite; /* медленная водная гладь */
}
/* Отключаем прежнюю «водную» анимацию на частях, оставляя оверлей на всю ноду */
#drawflow .drawflow-node.node-running .title-box,
#drawflow .drawflow-node.node-running .box {
background: var(--node) !important;
animation: none !important;
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,12 @@
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>НадTavern</title>
<link rel="icon" href="/favicon.ico" />
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png" />
<link rel="apple-touch-icon" href="/apple-touch-icon.png" />
<link rel="manifest" href="/site.webmanifest" />
<meta name="theme-color" content="#ffffff" />
<style>
body { font-family: Arial, sans-serif; margin: 24px; }
textarea { width: 100%; height: 200px; }

View File

@@ -21,10 +21,15 @@
// Готовим новые данные с глубокой копией blocks
const newData = { ...(n.data || {}), blocks: Array.isArray(d2.blocks) ? d2.blocks.map(b => ({ ...b })) : [] };
// 1) Обновляем внутреннее состояние Drawflow, чтобы export() возвращал актуальные данные
try { editor.updateNodeDataFromId(id, newData); } catch (e) {}
// 2) Обновляем DOM-отражение (источник правды для toPipelineJSON)
const el2 = document.querySelector(`#node-${id}`);
if (el2) el2.__data = JSON.parse(JSON.stringify(newData));
try {
if (w.AU && typeof w.AU.updateNodeDataAndDom === 'function') {
w.AU.updateNodeDataAndDom(editor, id, newData);
} else {
editor.updateNodeDataFromId(id, newData);
const el2 = document.querySelector(`#node-${id}`);
if (el2) el2.__data = JSON.parse(JSON.stringify(newData));
}
} catch (e) {}
} catch (e) {}
}
// Initial sync to attach blocks into __data for toPipelineJSON

View File

@@ -0,0 +1,158 @@
/* global window */
(function (w) {
'use strict';
// Centralized registry for provider-specific defaults (base_url, endpoint, headers, template)
// Exposes window.ProviderTemplates with:
// .register(name, { defaultConfig: () => ({ base_url, endpoint, headers, template }) })
// .defaults(provider)
// .ensureConfigs(nodeData)
// .getActiveProv(nodeData)
// .getActiveCfg(nodeData)
// .providers()
const PT = {};
const _registry = new Map();
function norm(p) {
return String(p == null ? 'openai' : p).toLowerCase().trim();
}
PT.register = function register(name, def) {
const key = norm(name);
if (!def || typeof def.defaultConfig !== 'function') {
throw new Error('ProviderTemplates.register: def.defaultConfig() required');
}
_registry.set(key, { defaultConfig: def.defaultConfig });
};
PT.providers = function providers() {
return Array.from(_registry.keys());
};
PT.defaults = function defaults(provider) {
const key = norm(provider);
const rec = _registry.get(key);
if (rec && typeof rec.defaultConfig === 'function') {
try { return rec.defaultConfig(); } catch (_) {}
}
return { base_url: '', endpoint: '', headers: `{}`, template: `{}` };
};
PT.ensureConfigs = function ensureConfigs(d) {
if (!d) return;
if (!d.provider) d.provider = 'openai';
if (!d.provider_configs || typeof d.provider_configs !== 'object') d.provider_configs = {};
for (const p of PT.providers()) {
if (!d.provider_configs[p]) d.provider_configs[p] = PT.defaults(p);
}
};
PT.getActiveProv = function getActiveProv(d) {
return norm(d && d.provider);
};
PT.getActiveCfg = function getActiveCfg(d) {
PT.ensureConfigs(d);
const p = PT.getActiveProv(d);
return d && d.provider_configs ? (d.provider_configs[p] || {}) : {};
};
// --- Built-in providers (default presets) ---
// Templates mirror original editor.html logic; use macros [[...]] and {{ ... }} as-is.
function T_OPENAI() { return `{
"model": "{{ model }}",
[[PROMPT]],
"temperature": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},
"top_p": {{ incoming.json.top_p|default(params.top_p|default(1)) }},
"max_tokens": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},
"max_completion_tokens": {{ incoming.json.max_completion_tokens|default(params.max_tokens|default(256)) }},
"presence_penalty": {{ incoming.json.presence_penalty|default(0) }},
"frequency_penalty": {{ incoming.json.frequency_penalty|default(0) }},
"stop": {{ incoming.json.stop|default(params.stop|default([])) }},
"stream": {{ incoming.json.stream|default(false) }}
}`; }
function T_GEMINI() { return `{
"model": "{{ model }}",
[[PROMPT]],
"safetySettings": {{ incoming.json.safetySettings|default([]) }},
"generationConfig": {
"temperature": {{ incoming.json.generationConfig.temperature|default(params.temperature|default(0.7)) }},
"topP": {{ incoming.json.generationConfig.topP|default(params.top_p|default(1)) }},
"maxOutputTokens": {{ incoming.json.generationConfig.maxOutputTokens|default(params.max_tokens|default(256)) }},
"stopSequences": {{ incoming.json.generationConfig.stopSequences|default(params.stop|default([])) }},
"candidateCount": {{ incoming.json.generationConfig.candidateCount|default(1) }},
"thinkingConfig": {
"includeThoughts": {{ incoming.json.generationConfig.thinkingConfig.includeThoughts|default(false) }},
"thinkingBudget": {{ incoming.json.generationConfig.thinkingConfig.thinkingBudget|default(0) }}
}
}
}`; }
function T_GEMINI_IMAGE() { return `{
"model": "{{ model }}",
[[PROMPT]]
}`; }
function T_CLAUDE() { return `{
"model": "{{ model }}",
[[PROMPT]],
"temperature": {{ incoming.json.temperature|default(params.temperature|default(0.7)) }},
"top_p": {{ incoming.json.top_p|default(params.top_p|default(1)) }},
"max_tokens": {{ incoming.json.max_tokens|default(params.max_tokens|default(256)) }},
"stop_sequences": {{ incoming.json.stop_sequences|default(params.stop|default([])) }},
"stream": {{ incoming.json.stream|default(false) }},
"thinking": {
"type": "{{ incoming.json.thinking.type|default('disabled') }}",
"budget_tokens": {{ incoming.json.thinking.budget_tokens|default(0) }}
},
"anthropic_version": "{{ anthropic_version|default('2023-06-01') }}"
}`; }
// Register built-ins
PT.register('openai', {
defaultConfig: () => ({
base_url: 'https://api.openai.com',
endpoint: '/v1/chat/completions',
headers: `{"Authorization":"Bearer [[VAR:incoming.headers.authorization]]"}`,
template: T_OPENAI()
})
});
PT.register('gemini', {
defaultConfig: () => ({
base_url: 'https://generativelanguage.googleapis.com',
endpoint: '/v1beta/models/{{ model }}:generateContent?key=[[VAR:incoming.api_keys.key]]',
headers: `{}`,
template: T_GEMINI()
})
});
PT.register('gemini_image', {
defaultConfig: () => ({
base_url: 'https://generativelanguage.googleapis.com',
endpoint: '/v1beta/models/{{ model }}:generateContent',
headers: `{"x-goog-api-key":"[[VAR:incoming.api_keys.key]]"}`,
template: T_GEMINI_IMAGE()
})
});
PT.register('claude', {
defaultConfig: () => ({
base_url: 'https://api.anthropic.com',
endpoint: '/v1/messages',
headers: `{"x-api-key":"[[VAR:incoming.headers.x-api-key]]","anthropic-version":"2023-06-01","anthropic-beta":"[[VAR:incoming.headers.anthropic-beta]]"}`,
template: T_CLAUDE()
})
});
try { console.debug('[ProviderTemplates] providers:', PT.providers()); } catch (_) {}
// Export globals and compatibility shims
try {
w.ProviderTemplates = PT;
// Back-compat shims so existing code can call global helpers
w.providerDefaults = PT.defaults;
w.ensureProviderConfigs = PT.ensureConfigs;
w.getActiveProv = PT.getActiveProv;
w.getActiveCfg = PT.getActiveCfg;
} catch (_) {}
})(window);

View File

@@ -12,7 +12,8 @@
// Top-level pipeline meta kept in memory and included into JSON on save.
// Allows UI to edit loop parameters without manual JSON edits.
let _pipelineMeta = {
// DRY: единый источник дефолтов и нормализации meta
const MetaDefaults = Object.freeze({
id: 'pipeline_editor',
name: 'Edited Pipeline',
parallel_limit: 8,
@@ -20,19 +21,74 @@
loop_max_iters: 1000,
loop_time_budget_ms: 10000,
clear_var_store: true,
// New: default HTTP timeout for upstream requests (seconds)
http_timeout_sec: 60,
// New (v1): стратегия извлечения текста для [[OUTx]] (глобальная по умолчанию)
// auto | deep | openai | gemini | claude | jsonpath
text_extract_strategy: 'auto',
// Используется при стратегии jsonpath (dot-нотация, поддержка индексов: a.b.0.c)
text_extract_json_path: '',
// Разделитель при объединении массива результатов
text_join_sep: '\n',
// v2: коллекция пресетов извлечения текста, управляется в "Запуск"
// [{ id, name, strategy, json_path, join_sep }]
text_extract_presets: [],
};
});
let _pipelineMeta = { ...MetaDefaults };
// Нормализатор meta: приводит типы, поддерживает синонимы ключей, заполняет дефолты
function ensureMeta(p) {
const src = (p && typeof p === 'object') ? p : {};
const out = { ...MetaDefaults };
// helpers
const toInt = (v, def) => {
try {
const n = parseInt(v, 10);
return Number.isFinite(n) && n > 0 ? n : def;
} catch { return def; }
};
const toNum = (v, def) => {
try {
const n = parseFloat(v);
return !Number.isNaN(n) && n > 0 ? n : def;
} catch { return def; }
};
// базовые поля
try { out.id = String((src.id ?? out.id) || out.id); } catch {}
try { out.name = String((src.name ?? out.name) || out.name); } catch {}
out.parallel_limit = toInt(src.parallel_limit, out.parallel_limit);
out.loop_mode = String((src.loop_mode ?? out.loop_mode) || out.loop_mode);
out.loop_max_iters = toInt(src.loop_max_iters, out.loop_max_iters);
out.loop_time_budget_ms = toInt(src.loop_time_budget_ms, out.loop_time_budget_ms);
out.clear_var_store = (typeof src.clear_var_store === 'boolean') ? !!src.clear_var_store : out.clear_var_store;
out.http_timeout_sec = toNum(src.http_timeout_sec, out.http_timeout_sec);
out.text_extract_strategy = String((src.text_extract_strategy ?? out.text_extract_strategy) || out.text_extract_strategy);
out.text_extract_json_path = String((src.text_extract_json_path ?? out.text_extract_json_path) || out.text_extract_json_path);
// поддержка синонимов text_join_sep (регистр и вариации)
let joinSep = out.text_join_sep;
try {
for (const k of Object.keys(src)) {
if (String(k).toLowerCase() === 'text_join_sep') { joinSep = src[k]; break; }
}
} catch {}
out.text_join_sep = String((joinSep ?? src.text_join_sep ?? out.text_join_sep) || out.text_join_sep);
// коллекция пресетов
try {
const arr = Array.isArray(src.text_extract_presets) ? src.text_extract_presets : [];
out.text_extract_presets = arr
.filter(it => it && typeof it === 'object')
.map((it, idx) => ({
id: String((it.id ?? '') || ('p' + Date.now().toString(36) + Math.random().toString(36).slice(2) + idx)),
name: String(it.name ?? (it.json_path || 'Preset')),
strategy: String(it.strategy ?? 'auto'),
json_path: String(it.json_path ?? ''),
join_sep: String(it.join_sep ?? '\n'),
}));
} catch { out.text_extract_presets = []; }
return out;
}
function getPipelineMeta() {
return { ..._pipelineMeta };
@@ -40,48 +96,8 @@
function updatePipelineMeta(p) {
if (!p || typeof p !== 'object') return;
const keys = [
'id','name','parallel_limit','loop_mode','loop_max_iters','loop_time_budget_ms','clear_var_store','http_timeout_sec',
'text_extract_strategy','text_extract_json_path','text_join_sep','text_join_sep','text_join_SEP',
// v2 presets collection
'text_extract_presets'
];
for (const k of keys) {
if (Object.prototype.hasOwnProperty.call(p, k) && p[k] !== undefined && p[k] !== null && (k === 'clear_var_store' ? true : p[k] !== '')) {
if (k === 'parallel_limit' || k === 'loop_max_iters' || k === 'loop_time_budget_ms') {
const v = parseInt(p[k], 10);
if (!Number.isNaN(v) && v > 0) _pipelineMeta[k] = v;
} else if (k === 'http_timeout_sec') {
const fv = parseFloat(p[k]);
if (!Number.isNaN(fv) && fv > 0) _pipelineMeta[k] = fv;
} else if (k === 'clear_var_store') {
_pipelineMeta[k] = !!p[k];
} else {
// спец-обработка коллекции пресетов
if (k === 'text_extract_presets') {
try {
const arr = Array.isArray(p[k]) ? p[k] : [];
_pipelineMeta[k] = arr
.filter(it => it && typeof it === 'object')
.map(it => ({
id: String((it.id ?? '') || ('p' + Date.now().toString(36) + Math.random().toString(36).slice(2))),
name: String(it.name ?? 'Preset'),
strategy: String(it.strategy ?? 'auto'),
json_path: String(it.json_path ?? ''),
join_sep: String(it.join_sep ?? '\n'),
}));
} catch (_) {
_pipelineMeta[k] = [];
}
} else if (k.toLowerCase() === 'text_join_sep') {
// нормализация ключа join separator (допускаем разные написания)
_pipelineMeta['text_join_sep'] = String(p[k]);
} else {
_pipelineMeta[k] = String(p[k]);
}
}
}
}
// DRY: единая точка нормализации
_pipelineMeta = ensureMeta({ ..._pipelineMeta, ...p });
}
// Drawflow -> pipeline JSON
@@ -260,24 +276,10 @@
}
}
// 3) Собираем итоговый pipeline JSON с метаданными
const meta = getPipelineMeta();
return {
id: meta.id || 'pipeline_editor',
name: meta.name || 'Edited Pipeline',
parallel_limit: (typeof meta.parallel_limit === 'number' ? meta.parallel_limit : 8),
loop_mode: (meta.loop_mode || 'dag'),
loop_max_iters: (typeof meta.loop_max_iters === 'number' ? meta.loop_max_iters : 1000),
loop_time_budget_ms: (typeof meta.loop_time_budget_ms === 'number' ? meta.loop_time_budget_ms : 10000),
clear_var_store: (typeof meta.clear_var_store === 'boolean' ? meta.clear_var_store : true),
http_timeout_sec: (typeof meta.http_timeout_sec === 'number' ? meta.http_timeout_sec : 60),
text_extract_strategy: (meta.text_extract_strategy || 'auto'),
text_extract_json_path: (meta.text_extract_json_path || ''),
text_join_sep: (meta.text_join_sep || '\n'),
// v2: persist presets
text_extract_presets: (Array.isArray(meta.text_extract_presets) ? meta.text_extract_presets : []),
nodes
};
// 3) Собираем итоговый pipeline JSON с метаданными (нормализованными)
const meta = ensureMeta(getPipelineMeta());
try { console.debug('[AgentUISer.toPipelineJSON] meta_keys', Object.keys(meta || {})); } catch (e) {}
return { ...meta, nodes };
}
// pipeline JSON -> Drawflow
@@ -285,25 +287,25 @@
ensureDeps();
const editor = w.editor;
const NODE_IO = w.NODE_IO;
// Сохраняем метаданные пайплайна для UI
try {
updatePipelineMeta({
id: p && p.id ? p.id : 'pipeline_editor',
name: p && p.name ? p.name : 'Edited Pipeline',
parallel_limit: (p && typeof p.parallel_limit === 'number') ? p.parallel_limit : 8,
loop_mode: p && p.loop_mode ? p.loop_mode : 'dag',
loop_max_iters: (p && typeof p.loop_max_iters === 'number') ? p.loop_max_iters : 1000,
loop_time_budget_ms: (p && typeof p.loop_time_budget_ms === 'number') ? p.loop_time_budget_ms : 10000,
clear_var_store: (p && typeof p.clear_var_store === 'boolean') ? p.clear_var_store : true,
http_timeout_sec: (p && typeof p.http_timeout_sec === 'number') ? p.http_timeout_sec : 60,
text_extract_strategy: (p && typeof p.text_extract_strategy === 'string') ? p.text_extract_strategy : 'auto',
text_extract_json_path: (p && typeof p.text_extract_json_path === 'string') ? p.text_extract_json_path : '',
text_join_sep: (p && typeof p.text_join_sep === 'string') ? p.text_join_sep : '\n',
// v2: presets from pipeline.json
text_extract_presets: (p && Array.isArray(p.text_extract_presets)) ? p.text_extract_presets : [],
});
} catch (e) {}
// Сохраняем метаданные пайплайна для UI (сквозная нормализация)
try {
updatePipelineMeta(p || {});
// Диагностический лог состава meta для подтверждения DRY-рефакторинга
try {
const metaKeys = ["id","name","parallel_limit","loop_mode","loop_max_iters","loop_time_budget_ms","clear_var_store","http_timeout_sec","text_extract_strategy","text_extract_json_path","text_join_sep","text_extract_presets"];
const incomingKeys = metaKeys.filter(k => (p && Object.prototype.hasOwnProperty.call(p, k)));
const currentMeta = (typeof getPipelineMeta === 'function') ? getPipelineMeta() : {};
console.debug('[AgentUISer.fromPipelineJSON] meta_keys', {
incomingKeys,
resultKeys: Object.keys(currentMeta || {}),
metaPreview: {
id: currentMeta && currentMeta.id,
loop_mode: currentMeta && currentMeta.loop_mode,
http_timeout_sec: currentMeta && currentMeta.http_timeout_sec
}
});
} catch (_) {}
} catch (e) {}
editor.clear();
let x = 100; let y = 120; // Fallback

213
static/js/utils.js Normal file
View File

@@ -0,0 +1,213 @@
/* global window */
// AgentUI common UI utilities (DRY helpers shared by editor.html and pm-ui.js)
(function (w) {
'use strict';
const AU = {};
// HTML escaping for safe text/attribute insertion
AU.escapeHtml = function escapeHtml(s) {
const str = String(s ?? '');
return str
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, "'");
};
// Attribute-safe escape (keeps quotes escaped; conservative)
AU.escAttr = function escAttr(v) {
const s = String(v ?? '');
return s
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, "'");
};
// Text-node escape (keeps quotes as-is for readability)
AU.escText = function escText(v) {
const s = String(v ?? '');
return s
.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
};
// DRY helper: sync Drawflow node data + mirror into DOM.__data with deep copy
AU.updateNodeDataAndDom = function updateNodeDataAndDom(editor, id, data) {
try { editor && typeof editor.updateNodeDataFromId === 'function' && editor.updateNodeDataFromId(id, data); } catch (_) {}
try {
const el = document.querySelector('#node-' + id);
if (el) el.__data = JSON.parse(JSON.stringify(data));
} catch (_) {}
};
// Double rAF helper: waits for two animation frames; returns Promise or accepts callback
AU.nextRaf2 = function nextRaf2(cb) {
try {
if (typeof requestAnimationFrame === 'function') {
if (typeof cb === 'function') {
requestAnimationFrame(() => { requestAnimationFrame(() => { try { cb(); } catch (_) {} }); });
return;
}
return new Promise((resolve) => requestAnimationFrame(() => requestAnimationFrame(() => resolve())));
} else {
if (typeof cb === 'function') { setTimeout(() => { try { cb(); } catch (_) {} }, 32); return; }
return new Promise((resolve) => setTimeout(resolve, 32));
}
} catch (_) {
if (typeof cb === 'function') { try { cb(); } catch (__ ) {} }
return Promise.resolve();
}
};
// Heuristic: looks like long base64 payload
AU.isProbablyBase64 = function isProbablyBase64(s) {
try {
if (typeof s !== 'string') return false;
if (s.length < 64) return false;
return /^[A-Za-z0-9+/=\r\n]+$/.test(s);
} catch { return false; }
};
AU.trimBase64 = function trimBase64(s, maxLen = 180) {
try {
const str = String(s ?? '');
if (str.length > maxLen) {
return str.slice(0, maxLen) + `... (trimmed ${str.length - maxLen})`;
}
return str;
} catch { return String(s ?? ''); }
};
// Flatten JSON-like object into [path, stringValue] pairs
// Includes special handling for backend preview objects: { "__truncated__": true, "preview": "..." }
AU.flattenObject = function flattenObject(obj, prefix = '') {
const out = [];
if (obj == null) return out;
if (typeof obj !== 'object') {
out.push([prefix, String(obj)]);
return out;
}
try {
const entries = Object.entries(obj);
for (const [k, v] of entries) {
const p = prefix ? `${prefix}.${k}` : k;
if (v && typeof v === 'object' && !Array.isArray(v)) {
// Special preview shape from backend
if (Object.prototype.hasOwnProperty.call(v, '__truncated__') && Object.prototype.hasOwnProperty.call(v, 'preview')) {
out.push([p, String(v.preview ?? '')]);
continue;
}
out.push(...AU.flattenObject(v, p));
} else {
try {
const s = (typeof v === 'string') ? v : JSON.stringify(v, null, 0);
out.push([p, s]);
} catch {
out.push([p, String(v)]);
}
}
}
} catch {
// Fallback best-effort
try { out.push([prefix, JSON.stringify(obj)]); } catch { out.push([prefix, String(obj)]); }
}
return out;
};
// Format headers dictionary into text lines "Key: Value"
AU.fmtHeaders = function fmtHeaders(h) {
try {
const keys = Object.keys(h || {});
return keys.map(k => `${k}: ${String(h[k])}`).join('\n');
} catch { return ''; }
};
// Build HTTP request preview text
AU.buildReqText = function buildReqText(x) {
if (!x) return '';
const head = `${x.method || 'POST'} ${x.url || '/'} HTTP/1.1`;
const host = (() => {
try { const u = new URL(x.url); return `Host: ${u.host}`; } catch { return ''; }
})();
const hs = AU.fmtHeaders(x.headers || {});
const body = String(x.body_text || '').trim();
return [head, host, hs, '', body].filter(Boolean).join('\n');
};
// Build HTTP response preview text
AU.buildRespText = function buildRespText(x) {
if (!x) return '';
const head = `HTTP/1.1 ${x.status || 0}`;
const hs = AU.fmtHeaders(x.headers || {});
const body = String(x.body_text || '').trim();
return [head, hs, '', body].filter(Boolean).join('\n');
};
// Unified fetch helper with timeout and JSON handling
AU.apiFetch = async function apiFetch(url, opts) {
const t0 = (typeof performance !== 'undefined' && performance.now) ? performance.now() : Date.now();
const o = opts || {};
const method = String(o.method || 'GET').toUpperCase();
const expectJson = (o.expectJson !== false); // default true
const headers = Object.assign({}, o.headers || {});
let body = o.body;
const timeoutMs = Number.isFinite(o.timeoutMs) ? o.timeoutMs : 15000;
const hasAbort = (typeof AbortController !== 'undefined');
const ctrl = hasAbort ? new AbortController() : null;
let to = null;
if (ctrl) {
try { to = setTimeout(() => { try { ctrl.abort(); } catch(_){} }, timeoutMs); } catch(_) {}
}
try {
if (expectJson) {
if (!headers['Accept'] && !headers['accept']) headers['Accept'] = 'application/json';
}
if (body != null) {
const isForm = (typeof FormData !== 'undefined' && body instanceof FormData);
const isBlob = (typeof Blob !== 'undefined' && body instanceof Blob);
if (typeof body === 'object' && !isForm && !isBlob) {
body = JSON.stringify(body);
if (!headers['Content-Type'] && !headers['content-type']) headers['Content-Type'] = 'application/json';
}
}
const res = await fetch(url, { method, headers, body, signal: ctrl ? ctrl.signal : undefined });
const ct = String(res.headers && res.headers.get ? (res.headers.get('Content-Type') || '') : '');
const isJsonCt = /application\/json/i.test(ct);
let data = null;
if (expectJson || isJsonCt) {
try { data = await res.json(); } catch (_) { data = null; }
} else {
try { data = await res.text(); } catch (_) { data = null; }
}
const t1 = (typeof performance !== 'undefined' && performance.now) ? performance.now() : Date.now();
try { console.debug('[AU.apiFetch]', { method, url, status: res.status, ms: Math.round(t1 - t0) }); } catch(_) {}
if (!res.ok) {
const msg = (data && typeof data === 'object' && data.error) ? String(data.error) : `HTTP ${res.status}`;
const err = new Error(`apiFetch: ${msg}`);
err.status = res.status;
err.data = data;
err.url = url;
throw err;
}
return data;
} finally {
if (to) { try { clearTimeout(to); } catch(_) {} }
}
};
// Expose
try { w.AU = AU; } catch (_) {}
try { w.nextRaf2 = AU.nextRaf2; } catch (_) {}
})(window);

View File

@@ -4,6 +4,12 @@
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>НадTavern — Pipeline Editor (JSON)</title>
<link rel="icon" href="/favicon.ico" />
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png" />
<link rel="apple-touch-icon" href="/apple-touch-icon.png" />
<link rel="manifest" href="/site.webmanifest" />
<meta name="theme-color" content="#ffffff" />
<style>
body { font-family: Arial, sans-serif; margin: 24px; }
textarea { width: 100%; height: 70vh; }

1
tests/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Make tests a package so imports like "from tests.utils import ..." work.

199
tests/test_cancel_modes.py Normal file
View File

@@ -0,0 +1,199 @@
import asyncio
import json
from typing import Any, Dict
from agentui.pipeline.executor import PipelineExecutor, ExecutionError
from agentui.common.cancel import request_cancel, clear_cancel
import agentui.providers.http_client as hc
import agentui.pipeline.executor as ex
from tests.utils import ctx as _ctx
class DummyResponse:
def __init__(self, status: int, json_obj: Dict[str, Any]) -> None:
self.status_code = status
self._json = json_obj
self.headers = {}
try:
self.content = json.dumps(json_obj, ensure_ascii=False).encode("utf-8")
except Exception:
self.content = b"{}"
try:
self.text = json.dumps(json_obj, ensure_ascii=False)
except Exception:
self.text = "{}"
def json(self) -> Dict[str, Any]:
return self._json
class DummyClient:
"""
Async client with artificial delay to simulate in-flight HTTP that can be cancelled.
Provides .post() and .request() compatible with executor usage.
"""
def __init__(self, delay: float = 0.3, status_code: int = 200) -> None:
self._delay = delay
self._status = status_code
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return False
async def post(self, url: str, content: bytes, headers: Dict[str, str]):
# Artificial delay to allow cancel/abort to happen while awaiting
await asyncio.sleep(self._delay)
try:
payload = json.loads(content.decode("utf-8"))
except Exception:
payload = {"_raw": content.decode("utf-8", errors="ignore")}
return DummyResponse(self._status, {"echo": payload})
async def request(self, method: str, url: str, headers: Dict[str, str], content: bytes | None):
return await self.post(url, content or b"{}", headers)
def _patch_http_client(delay: float = 0.3):
"""
Patch both providers.http_client.build_client and executor.build_client
to return our DummyClient with a given delay.
"""
orig_hc = hc.build_client
orig_ex = ex.build_client
hc.build_client = lambda timeout=60.0: DummyClient(delay=delay) # type: ignore[assignment]
ex.build_client = lambda timeout=60.0: DummyClient(delay=delay) # type: ignore[assignment]
return orig_hc, orig_ex
def _restore_http_client(orig_hc, orig_ex) -> None:
hc.build_client = orig_hc
ex.build_client = orig_ex
def test_graceful_cancel_while_providercall():
"""
Expectation:
- Cancel(mode=graceful) during in-flight HTTP should NOT interrupt the current request.
- While-wrapper should stop before starting next iteration.
- Final CYCLEINDEX__n2 == 0 (only first iteration finished), WAS_ERROR__n2 is False/absent.
"""
async def main():
p = {
"id": "p_cancel_soft",
"name": "ProviderCall graceful cancel",
"loop_mode": "dag",
"nodes": [
{
"id": "n2",
"type": "ProviderCall",
"config": {
"provider": "openai",
"while_expr": "cycleindex < 5",
"while_max_iters": 10,
# ignore_errors not needed for graceful (no interruption of in-flight)
"provider_configs": {
"openai": {
"base_url": "http://dummy.local",
"headers": "{}",
"template": "{}"
}
}
},
"in": {}
}
]
}
pid = p["id"]
orig_hc, orig_ex = _patch_http_client(delay=0.3)
try:
ctx = _ctx()
exr = PipelineExecutor(p)
task = asyncio.create_task(exr.run(ctx))
# Give the node time to start HTTP, then request graceful cancel
await asyncio.sleep(0.05)
request_cancel(pid, mode="graceful")
out = await task
finally:
_restore_http_client(orig_hc, orig_ex)
try:
clear_cancel(pid)
except Exception:
pass
assert isinstance(out, dict)
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Only first iteration should have finished; last index = 0
assert vars_map.get("CYCLEINDEX__n2") == 0
# No error expected on graceful (we didn't interrupt the in-flight HTTP)
assert vars_map.get("WAS_ERROR__n2") in (False, None)
asyncio.run(main())
def test_abort_cancel_inflight_providercall():
"""
Expectation:
- Cancel(mode=abort) during in-flight HTTP cancels the await with ExecutionError.
- While-wrapper with ignore_errors=True converts it into {"result":{"error":...}}.
- Final CYCLEINDEX__n2 == 0 and WAS_ERROR__n2 == True; error mentions 'Cancelled by user (abort)'.
"""
async def main():
p = {
"id": "p_cancel_abort",
"name": "ProviderCall abort cancel",
"loop_mode": "dag",
"nodes": [
{
"id": "n2",
"type": "ProviderCall",
"config": {
"provider": "openai",
"while_expr": "cycleindex < 5",
"while_max_iters": 10,
"ignore_errors": True, # convert cancellation exception into error payload
"provider_configs": {
"openai": {
"base_url": "http://dummy.local",
"headers": "{}",
"template": "{}"
}
}
},
"in": {}
}
]
}
pid = p["id"]
orig_hc, orig_ex = _patch_http_client(delay=0.3)
try:
ctx = _ctx()
exr = PipelineExecutor(p)
task = asyncio.create_task(exr.run(ctx))
# Let HTTP start, then trigger hard abort
await asyncio.sleep(0.05)
request_cancel(pid, mode="abort")
out = await task
finally:
_restore_http_client(orig_hc, orig_ex)
try:
clear_cancel(pid)
except Exception:
pass
assert isinstance(out, dict)
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# First iteration was started; after abort it is considered errored and loop stops
assert vars_map.get("CYCLEINDEX__n2") == 0
assert vars_map.get("WAS_ERROR__n2") is True
# Error propagated into node's result (ignore_errors=True path)
res = out.get("result") or {}
assert isinstance(res, dict)
err = res.get("error")
assert isinstance(err, str) and "Cancelled by user (abort)" in err
asyncio.run(main())

View File

@@ -1,25 +1,9 @@
import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor, ExecutionError, Node, NODE_REGISTRY
from tests.utils import pp as _pp, base_ctx as _base_ctx
# Helper to pretty print short JSON safely
def _pp(obj, max_len=800):
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def _base_ctx(vendor="openai"):
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": {"temperature": 0.1},
"chat": {"last_user": "hi"},
"OUT": {},
}
async def scenario_if_single_quotes_ok():
print("\n=== SCENARIO 1: If with single quotes ===")

View File

@@ -1,33 +1,8 @@
import asyncio
import json
from agentui.pipeline.executor import PipelineExecutor
from agentui.pipeline.storage import clear_var_store
from tests.utils import pp as _pp, ctx as _ctx
def _pp(obj, max_len=800):
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def _ctx(vendor="openai", incoming=None, params=None):
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": params or {"temperature": 0.25},
"chat": {"last_user": "Привет"},
"OUT": {},
"incoming": incoming or {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {"x": "X-HEADER"},
"json": {},
},
}
async def scenario_bare_vars_and_braces():
print("\n=== MACROS 1: Bare [[NAME]] и {{ NAME }} + числа/объекты без кавычек ===")
@@ -63,6 +38,7 @@ async def scenario_bare_vars_and_braces():
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
async def scenario_var_path_and_defaults():
print("\n=== MACROS 2: [[VAR:path]] и {{ ...|default(...) }} (вложенные и JSON-литералы) ===")
incoming = {
@@ -101,6 +77,7 @@ async def scenario_var_path_and_defaults():
out = await PipelineExecutor(p).run(_ctx(incoming=incoming, params={"temperature": 0.2}))
print("OUT:", _pp(out))
async def scenario_out_macros_full_and_short():
print("\n=== MACROS 3: [[OUT:nX...]] и короткая форма [[OUTx]] ===")
p = {
@@ -142,6 +119,7 @@ async def scenario_out_macros_full_and_short():
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
async def scenario_store_macros_two_runs():
print("\n=== MACROS 4: [[STORE:key]] и {{ STORE.key }} между запусками (clear_var_store=False) ===")
pid = "p_macros_4_store"
@@ -198,6 +176,7 @@ async def scenario_store_macros_two_runs():
out2 = await PipelineExecutor(p2).run(_ctx())
print("RUN2:", _pp(out2))
async def scenario_pm_prompt_blocks_to_provider_structs():
print("\n=== MACROS 5: Prompt Blocks ([[PROMPT]]) → provider-structures (OpenAI) ===")
# Проверяем, что [[PROMPT]] со списком блоков превращается в "messages":[...]
@@ -232,6 +211,7 @@ async def scenario_pm_prompt_blocks_to_provider_structs():
out = await PipelineExecutor(p).run(_ctx())
print("OUT:", _pp(out))
def run_all():
async def main():
await scenario_bare_vars_and_braces()
@@ -242,5 +222,6 @@ def run_all():
print("\n=== MACROS VARS SUITE: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()

View File

@@ -0,0 +1,249 @@
import asyncio
import json
from typing import Any, Dict, List
from agentui.pipeline.executor import PipelineExecutor
import agentui.providers.http_client as hc
from tests.utils import ctx as _ctx, pp as _pp
# Capture of all outbound ProviderCall HTTP requests (one per run)
CAPTURED: List[Dict[str, Any]] = []
class DummyResponse:
def __init__(self, status_code: int = 200, body: Dict[str, Any] | None = None):
self.status_code = status_code
self._json = body if body is not None else {"ok": True}
self.headers = {}
try:
self.content = json.dumps(self._json, ensure_ascii=False).encode("utf-8")
except Exception:
self.content = b"{}"
try:
self.text = json.dumps(self._json, ensure_ascii=False)
except Exception:
self.text = "{}"
def json(self) -> Any:
return self._json
class DummyClient:
def __init__(self, capture: List[Dict[str, Any]], status_code: int = 200):
self._capture = capture
self._status = status_code
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return False
async def post(self, url: str, content: bytes, headers: Dict[str, str]):
try:
payload = json.loads(content.decode("utf-8"))
except Exception:
payload = {"_raw": content.decode("utf-8", errors="ignore")}
rec = {"url": url, "headers": headers, "payload": payload}
self._capture.append(rec)
# Echo payload back to keep extractor happy but not tied to vendor formats
return DummyResponse(self._status, {"echo": rec})
# RawForward may use .request, but we don't need it here
async def request(self, method: str, url: str, headers: Dict[str, str], content: bytes | None):
return await self.post(url, content or b"{}", headers)
def _patch_http_client():
"""Monkeypatch build_client used by ProviderCall to our dummy."""
hc.build_client = lambda timeout=60.0: DummyClient(CAPTURED, 200) # type: ignore[assignment]
# Также патчим символ, импортированный внутрь executor, чтобы ProviderCall использовал DummyClient
import agentui.pipeline.executor as ex # type: ignore
ex.build_client = lambda timeout=60.0: DummyClient(CAPTURED, 200) # type: ignore
def _mk_pipeline(provider: str, prompt_combine: str) -> Dict[str, Any]:
"""Build a minimal ProviderCall-only pipeline for a given provider and combine spec."""
provider = provider.lower().strip()
if provider not in {"openai", "gemini", "claude"}:
raise AssertionError(f"Unsupported provider in test: {provider}")
base_url = "http://mock.local"
if provider == "openai":
endpoint = "/v1/chat/completions"
template = '{ "model": "{{ model }}", [[PROMPT]] }'
elif provider == "gemini":
endpoint = "/v1beta/models/{{ model }}:generateContent"
template = '{ "model": "{{ model }}", [[PROMPT]] }'
else: # claude
endpoint = "/v1/messages"
template = '{ "model": "{{ model }}", [[PROMPT]] }'
p = {
"id": f"p_prompt_combine_{provider}",
"name": f"prompt_combine to {provider}",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "ProviderCall",
"config": {
"provider": provider,
"provider_configs": {
provider: {
"base_url": base_url,
"endpoint": endpoint,
"headers": "{}",
"template": template,
}
},
# Key under test:
"prompt_combine": prompt_combine,
# Prompt Blocks (PROMPT)
"blocks": [
{"id": "b1", "name": "sys", "role": "system", "prompt": "Ты — Narrator-chan.", "enabled": True, "order": 0},
{"id": "b2", "name": "user", "role": "user", "prompt": "как лела", "enabled": True, "order": 1},
],
},
"in": {},
}
],
}
return p
def _ctx_with_incoming(incoming_json: Dict[str, Any], vendor: str = "openai") -> Dict[str, Any]:
base = _ctx(vendor=vendor)
inc = dict(base["incoming"])
inc["json"] = incoming_json
base["incoming"] = inc
return base
async def scenario_openai_target_from_gemini_contents():
print("\n=== PROMPT_COMBINE 1: target=openai, incoming=gemini.contents & PROMPT ===")
_patch_http_client()
CAPTURED.clear()
# Incoming JSON in Gemini shape
incoming_json = {
"contents": [
{"role": "user", "parts": [{"text": "Прив"}]},
{"role": "model", "parts": [{"text": "И тебе привет!"}]},
]
}
p = _mk_pipeline("openai", "[[VAR:incoming.json.contents]] & [[PROMPT]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="gemini"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
req = CAPTURED[-1]
payload = req["payload"]
# Validate OpenAI body
assert "messages" in payload, "OpenAI payload must contain messages"
msgs = payload["messages"]
# Expected: 2 (converted Gemini) + 2 (PROMPT blocks system+user) = 4
assert isinstance(msgs, list) and len(msgs) == 4
roles = [m.get("role") for m in msgs]
# Gemini model -> OpenAI assistant
assert "assistant" in roles and "user" in roles
# PROMPT system+user present (system may be not first without @pos; we just ensure existence)
assert any(m.get("role") == "system" for m in msgs), "System message from PROMPT must be present"
async def scenario_gemini_target_from_openai_messages():
print("\n=== PROMPT_COMBINE 2: target=gemini, incoming=openai.messages & PROMPT ===")
_patch_http_client()
CAPTURED.clear()
incoming_json = {
"messages": [
{"role": "system", "content": "Системный-тест из входящего"},
{"role": "user", "content": "Its just me.."},
{"role": "assistant", "content": "Reply from model"},
]
}
p = _mk_pipeline("gemini", "[[VAR:incoming.json.messages]] & [[PROMPT]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="openai"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
payload = CAPTURED[-1]["payload"]
# Validate Gemini body
assert "contents" in payload, "Gemini payload must contain contents"
cnts = payload["contents"]
assert isinstance(cnts, list)
# PROMPT system goes to systemInstruction, user block goes to contents
assert "systemInstruction" in payload, "Gemini payload must contain systemInstruction when system text exists"
si = payload["systemInstruction"]
# SystemInstruction.parts[].text must include both incoming system and PROMPT system merged
si_texts = []
try:
for prt in si.get("parts", []):
t = prt.get("text")
if isinstance(t, str) and t.strip():
si_texts.append(t.strip())
except Exception:
pass
joined = "\n".join(si_texts)
assert "Системный-тест из входящего" in joined, "Incoming system must be merged into systemInstruction"
assert "Narrator-chan" in joined, "PROMPT system must be merged into systemInstruction"
async def scenario_claude_target_from_openai_messages():
print("\n=== PROMPT_COMBINE 3: target=claude, incoming=openai.messages & PROMPT ===")
_patch_http_client()
CAPTURED.clear()
incoming_json = {
"messages": [
{"role": "system", "content": "Системный-тест CLAUDE"},
{"role": "user", "content": "Прив"},
{"role": "assistant", "content": "Привет!"},
]
}
p = _mk_pipeline("claude", "[[VAR:incoming.json.messages]] & [[PROMPT]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="openai"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
payload = CAPTURED[-1]["payload"]
# Validate Claude body
assert "messages" in payload, "Claude payload must contain messages"
assert "system" in payload, "Claude payload must contain system blocks"
sys_blocks = payload["system"]
# system must be array of blocks with type=text
assert isinstance(sys_blocks, list) and any(isinstance(b, dict) and b.get("type") == "text" for b in sys_blocks)
sys_text_join = "\n".join([b.get("text") for b in sys_blocks if isinstance(b, dict) and isinstance(b.get("text"), str)])
assert "Системный-тест CLAUDE" in sys_text_join, "Incoming system should be present"
assert "Narrator-chan" in sys_text_join, "PROMPT system should be present"
async def scenario_prepend_positioning_openai():
print("\n=== PROMPT_COMBINE 4: target=openai, PROMPT@pos=prepend & incoming.contents ===")
_patch_http_client()
CAPTURED.clear()
incoming_json = {
"contents": [
{"role": "user", "parts": [{"text": "A"}]},
{"role": "model", "parts": [{"text": "B"}]},
]
}
# Put PROMPT first; ensure system message becomes first in messages
p = _mk_pipeline("openai", "[[PROMPT]]@pos=prepend & [[VAR:incoming.json.contents]]")
out = await PipelineExecutor(p).run(_ctx_with_incoming(incoming_json, vendor="gemini"))
print("PIPE OUT:", _pp(out))
assert CAPTURED, "No HTTP request captured"
payload = CAPTURED[-1]["payload"]
msgs = payload.get("messages", [])
assert isinstance(msgs, list) and len(msgs) >= 2
first = msgs[0]
# Expect first to be system (from PROMPT) due to prepend
assert first.get("role") == "system", f"Expected system as first message, got {first}"
def test_prompt_combine_all():
async def main():
await scenario_openai_target_from_gemini_contents()
await scenario_gemini_target_from_openai_messages()
await scenario_claude_target_from_openai_messages()
await scenario_prepend_positioning_openai()
print("\n=== PROMPT_COMBINE: DONE ===")
asyncio.run(main())

View File

@@ -0,0 +1,23 @@
# Pytest-обёртка для существующих сценариев, которые сами себя запускают через run_all()/run_checks()
# Позволяет запускать все тесты одной командой: python -m pytest -q
# Не меняем исходные файлы, просто вызываем их публичные функции из pytest-тестов.
def test_executor_iterative():
# tests/test_executor_iterative.py содержит run_checks() (внутри сам asyncio.run)
from tests.test_executor_iterative import run_checks
run_checks()
def test_edge_cases():
# tests/test_edge_cases.py содержит run_all() (внутри сам asyncio.run)
from tests.test_edge_cases import run_all
run_all()
def test_macros_and_vars():
# tests/test_macros_vars.py содержит run_all() (внутри сам asyncio.run)
from tests.test_macros_vars import run_all
run_all()
def test_while_nodes():
# наш новый набор сценариев; внутри есть run_all() со своим asyncio.run
from tests.test_while_nodes import run_all
run_all()

134
tests/test_while_nodes.py Normal file
View File

@@ -0,0 +1,134 @@
import asyncio
from agentui.pipeline.executor import PipelineExecutor
from tests.utils import ctx as _ctx
async def scenario_providercall_while_ignore():
# ProviderCall with while loop and ignore_errors enabled.
# No base_url is provided to force ExecutionError inside node.run();
# wrapper will catch it and expose {"error": "..."} plus vars.
p = {
"id": "p_pc_while_ignore",
"name": "ProviderCall while+ignore",
"loop_mode": "dag",
"nodes": [
{
"id": "n2",
"type": "ProviderCall",
"config": {
"provider": "openai",
# while: 3 iterations (0,1,2)
"while_expr": "cycleindex < 3",
"while_max_iters": 10,
"ignore_errors": True,
# no base_url / provider_configs to trigger error safely
},
"in": {}
}
]
}
out = await PipelineExecutor(p).run(_ctx())
assert isinstance(out, dict)
# Wrapper returns final out with .vars merged by executor into STORE as well, but we assert on node out.
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Final iteration index should be 2
assert vars_map.get("WAS_ERROR__n2") is True
assert vars_map.get("CYCLEINDEX__n2") == 2
async def scenario_rawforward_while_ignore():
# RawForward with while loop and ignore_errors enabled.
# No base_url and incoming.json is a plain string -> detect_vendor=unknown -> ExecutionError,
# wrapper catches and returns {"error": "..."} with vars set.
p = {
"id": "p_rf_while_ignore",
"name": "RawForward while+ignore",
"loop_mode": "dag",
"nodes": [
{
"id": "n1",
"type": "RawForward",
"config": {
"while_expr": "cycleindex < 2",
"while_max_iters": 10,
"ignore_errors": True,
# no base_url; vendor detect will fail on plain text
},
"in": {}
}
]
}
ctx = _ctx()
# Provide incoming as plain text-like JSON so detect_vendor returns unknown
ctx["incoming"] = {
"method": "POST",
"url": "http://example.local/test",
"path": "/test",
"query": "",
"headers": {"content-type": "text/plain"},
"json": "raw-plain-body-simulated"
}
out = await PipelineExecutor(p).run(ctx)
assert isinstance(out, dict)
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Final iteration index should be 1 (0 and 1)
assert vars_map.get("WAS_ERROR__n1") is True
assert vars_map.get("CYCLEINDEX__n1") == 1
async def scenario_providercall_while_with_out_macro():
# SetVars -> ProviderCall while uses OUT from n1 in expression
# Expression: ([[OUT:n1.vars.MSG]] contains "123") && (cycleindex < 2)
# Ignore errors to bypass real HTTP
p = {
"id": "p_pc_while_out_macro",
"name": "ProviderCall while with OUT macro",
"loop_mode": "iterative",
"nodes": [
{
"id": "n1",
"type": "SetVars",
"config": {
"variables": [
{"id": "v1", "name": "MSG", "mode": "string", "value": "abc123xyz"}
]
},
"in": {}
},
{
"id": "n2",
"type": "ProviderCall",
"config": {
"provider": "openai",
"while_expr": "([[OUT:n1.vars.MSG]] contains \"123\") && (cycleindex < 2)",
"while_max_iters": 10,
"ignore_errors": True
},
"in": {
"depends": "n1.done"
}
}
]
}
out = await PipelineExecutor(p).run(_ctx())
assert isinstance(out, dict)
vars_map = out.get("vars") or {}
assert isinstance(vars_map, dict)
# Since MSG contains "123" and cycleindex < 2, two iterations (0,1)
assert vars_map.get("WAS_ERROR__n2") is True
assert vars_map.get("CYCLEINDEX__n2") == 1
def run_all():
async def main():
await scenario_providercall_while_ignore()
await scenario_rawforward_while_ignore()
await scenario_providercall_while_with_out_macro()
print("\n=== WHILE_NODES: DONE ===")
asyncio.run(main())
if __name__ == "__main__":
run_all()

52
tests/utils.py Normal file
View File

@@ -0,0 +1,52 @@
from __future__ import annotations
import json
from typing import Any, Dict, Optional
def pp(obj: Any, max_len: int = 800) -> str:
"""
Pretty-print JSON-like objects in tests with length guard.
"""
try:
s = json.dumps(obj, ensure_ascii=False, indent=2)
except Exception:
s = str(obj)
if len(s) > max_len:
return s[:max_len] + "...<truncated>"
return s
def base_ctx(vendor: str = "openai") -> Dict[str, Any]:
"""
Base context used by edge-case tests (mirrors previous _base_ctx).
"""
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": {"temperature": 0.1},
"chat": {"last_user": "hi"},
"OUT": {},
}
def ctx(vendor: str = "openai", incoming: Optional[Dict[str, Any]] = None, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
General context used by macros/vars tests (mirrors previous _ctx).
"""
return {
"model": "gpt-x",
"vendor_format": vendor,
"params": params or {"temperature": 0.25},
"chat": {"last_user": "Привет"},
"OUT": {},
"incoming": incoming
or {
"method": "POST",
"url": "http://localhost/test",
"path": "/test",
"query": "",
"headers": {"x": "X-HEADER"},
"json": {},
},
}