mirror of
http://112.124.100.131/huang.ze/ebiz-dify-ai.git
synced 2025-12-07 18:06:52 +08:00
feat: Parallel Execution of Nodes in Workflows (#8192)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com> Co-authored-by: Yi <yxiaoisme@gmail.com> Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
@@ -1,17 +1,72 @@
|
||||
import time
|
||||
import uuid
|
||||
from os import getenv
|
||||
from typing import cast
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import NodeRunResult, UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.code.code_node import CodeNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from core.workflow.nodes.code.entities import CodeNodeData
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
CODE_MAX_STRING_LENGTH = int(getenv("CODE_MAX_STRING_LENGTH", "10000"))
|
||||
|
||||
|
||||
def init_code_node(code_config: dict):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-code-target",
|
||||
"source": "start",
|
||||
"target": "code",
|
||||
},
|
||||
],
|
||||
"nodes": [{"data": {"type": "start"}, "id": "start"}, code_config],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
variable_pool.add(["code", "123", "args1"], 1)
|
||||
variable_pool.add(["code", "123", "args2"], 2)
|
||||
|
||||
node = CodeNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config=code_config,
|
||||
)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
|
||||
def test_execute_code(setup_code_executor_mock):
|
||||
code = """
|
||||
@@ -22,44 +77,36 @@ def test_execute_code(setup_code_executor_mock):
|
||||
"""
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = "\n".join([line[4:] for line in code.split("\n")])
|
||||
node = CodeNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
},
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
|
||||
pool.add(["1", "123", "args1"], 1)
|
||||
pool.add(["1", "123", "args2"], 2)
|
||||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
},
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
}
|
||||
|
||||
node = init_code_node(code_config)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert isinstance(result, NodeRunResult)
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs["result"] == 3
|
||||
assert result.error is None
|
||||
|
||||
@@ -74,44 +121,34 @@ def test_execute_code_output_validator(setup_code_executor_mock):
|
||||
"""
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = "\n".join([line[4:] for line in code.split("\n")])
|
||||
node = CodeNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
|
||||
pool.add(["1", "123", "args1"], 1)
|
||||
pool.add(["1", "123", "args2"], 2)
|
||||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "string",
|
||||
},
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
}
|
||||
|
||||
node = init_code_node(code_config)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
|
||||
result = node._run()
|
||||
assert isinstance(result, NodeRunResult)
|
||||
assert result.status == WorkflowNodeExecutionStatus.FAILED
|
||||
assert result.error == "Output variable `result` must be a string"
|
||||
|
||||
@@ -127,65 +164,60 @@ def test_execute_code_output_validator_depth():
|
||||
"""
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = "\n".join([line[4:] for line in code.split("\n")])
|
||||
node = CodeNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"string_validator": {
|
||||
"type": "string",
|
||||
},
|
||||
"number_validator": {
|
||||
"type": "number",
|
||||
},
|
||||
"number_array_validator": {
|
||||
"type": "array[number]",
|
||||
},
|
||||
"string_array_validator": {
|
||||
"type": "array[string]",
|
||||
},
|
||||
"object_validator": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
},
|
||||
"depth": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"depth": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"depth": {
|
||||
"type": "number",
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"string_validator": {
|
||||
"type": "string",
|
||||
},
|
||||
"number_validator": {
|
||||
"type": "number",
|
||||
},
|
||||
"number_array_validator": {
|
||||
"type": "array[number]",
|
||||
},
|
||||
"string_array_validator": {
|
||||
"type": "array[string]",
|
||||
},
|
||||
"object_validator": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
},
|
||||
"depth": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"depth": {
|
||||
"type": "object",
|
||||
"children": {
|
||||
"depth": {
|
||||
"type": "number",
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
node = init_code_node(code_config)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
@@ -196,6 +228,8 @@ def test_execute_code_output_validator_depth():
|
||||
"object_validator": {"result": 1, "depth": {"depth": {"depth": 1}}},
|
||||
}
|
||||
|
||||
node.node_data = cast(CodeNodeData, node.node_data)
|
||||
|
||||
# validate
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
@@ -250,35 +284,30 @@ def test_execute_code_output_object_list():
|
||||
"""
|
||||
# trim first 4 spaces at the beginning of each line
|
||||
code = "\n".join([line[4:] for line in code.split("\n")])
|
||||
node = CodeNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"object_list": {
|
||||
"type": "array[object]",
|
||||
},
|
||||
|
||||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"outputs": {
|
||||
"object_list": {
|
||||
"type": "array[object]",
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"answer": "123",
|
||||
"code_language": "python3",
|
||||
"code": code,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
node = init_code_node(code_config)
|
||||
|
||||
# construct result
|
||||
result = {
|
||||
@@ -295,6 +324,8 @@ def test_execute_code_output_object_list():
|
||||
]
|
||||
}
|
||||
|
||||
node.node_data = cast(CodeNodeData, node.node_data)
|
||||
|
||||
# validate
|
||||
node._transform_result(result, node.node_data.outputs)
|
||||
|
||||
|
||||
@@ -1,31 +1,69 @@
|
||||
import time
|
||||
import uuid
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.http_request.http_request_node import HttpRequestNode
|
||||
from models.workflow import WorkflowType
|
||||
from tests.integration_tests.workflow.nodes.__mock.http import setup_http_mock
|
||||
|
||||
BASIC_NODE_DATA = {
|
||||
"tenant_id": "1",
|
||||
"app_id": "1",
|
||||
"workflow_id": "1",
|
||||
"user_id": "1",
|
||||
"user_from": UserFrom.ACCOUNT,
|
||||
"invoke_from": InvokeFrom.WEB_APP,
|
||||
}
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
|
||||
pool.add(["a", "b123", "args1"], 1)
|
||||
pool.add(["a", "b123", "args2"], 2)
|
||||
def init_http_node(config: dict):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-next-target",
|
||||
"source": "start",
|
||||
"target": "1",
|
||||
},
|
||||
],
|
||||
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
variable_pool.add(["a", "b123", "args1"], 1)
|
||||
variable_pool.add(["a", "b123", "args2"], 2)
|
||||
|
||||
return HttpRequestNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
|
||||
def test_get(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -45,12 +83,11 @@ def test_get(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": None,
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert "?A=b" in data
|
||||
@@ -59,7 +96,7 @@ def test_get(setup_http_mock):
|
||||
|
||||
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
|
||||
def test_no_auth(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -75,12 +112,11 @@ def test_no_auth(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": None,
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert "?A=b" in data
|
||||
@@ -89,7 +125,7 @@ def test_no_auth(setup_http_mock):
|
||||
|
||||
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
|
||||
def test_custom_authorization_header(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -109,12 +145,11 @@ def test_custom_authorization_header(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": None,
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert "?A=b" in data
|
||||
@@ -123,7 +158,7 @@ def test_custom_authorization_header(setup_http_mock):
|
||||
|
||||
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
|
||||
def test_template(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -143,11 +178,11 @@ def test_template(setup_http_mock):
|
||||
"params": "A:b\nTemplate:{{#a.b123.args2#}}",
|
||||
"body": None,
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert "?A=b" in data
|
||||
@@ -158,7 +193,7 @@ def test_template(setup_http_mock):
|
||||
|
||||
@pytest.mark.parametrize("setup_http_mock", [["none"]], indirect=True)
|
||||
def test_json(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -178,11 +213,11 @@ def test_json(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": {"type": "json", "data": '{"a": "{{#a.b123.args1#}}"}'},
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert '{"a": "1"}' in data
|
||||
@@ -190,7 +225,7 @@ def test_json(setup_http_mock):
|
||||
|
||||
|
||||
def test_x_www_form_urlencoded(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -210,11 +245,11 @@ def test_x_www_form_urlencoded(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": {"type": "x-www-form-urlencoded", "data": "a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}"},
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert "a=1&b=2" in data
|
||||
@@ -222,7 +257,7 @@ def test_x_www_form_urlencoded(setup_http_mock):
|
||||
|
||||
|
||||
def test_form_data(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -242,11 +277,11 @@ def test_form_data(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": {"type": "form-data", "data": "a:{{#a.b123.args1#}}\nb:{{#a.b123.args2#}}"},
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert 'form-data; name="a"' in data
|
||||
@@ -257,7 +292,7 @@ def test_form_data(setup_http_mock):
|
||||
|
||||
|
||||
def test_none_data(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -277,11 +312,11 @@ def test_none_data(setup_http_mock):
|
||||
"params": "A:b",
|
||||
"body": {"type": "none", "data": "123123123"},
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
data = result.process_data.get("request", "")
|
||||
|
||||
assert "X-Header: 123" in data
|
||||
@@ -289,7 +324,7 @@ def test_none_data(setup_http_mock):
|
||||
|
||||
|
||||
def test_mock_404(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -305,19 +340,19 @@ def test_mock_404(setup_http_mock):
|
||||
"params": "",
|
||||
"headers": "X-Header:123",
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.outputs is not None
|
||||
resp = result.outputs
|
||||
|
||||
assert 404 == resp.get("status_code")
|
||||
assert "Not Found" in resp.get("body")
|
||||
assert "Not Found" in resp.get("body", "")
|
||||
|
||||
|
||||
def test_multi_colons_parse(setup_http_mock):
|
||||
node = HttpRequestNode(
|
||||
node = init_http_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -333,13 +368,14 @@ def test_multi_colons_parse(setup_http_mock):
|
||||
"headers": "Referer:http://example3.com\nRedirect:http://example4.com",
|
||||
"body": {"type": "form-data", "data": "Referer:http://example5.com\nRedirect:http://example6.com"},
|
||||
},
|
||||
},
|
||||
**BASIC_NODE_DATA,
|
||||
}
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert result.process_data is not None
|
||||
assert result.outputs is not None
|
||||
resp = result.outputs
|
||||
|
||||
assert urlencode({"Redirect": "http://example2.com"}) in result.process_data.get("request")
|
||||
assert 'form-data; name="Redirect"\n\nhttp://example6.com' in result.process_data.get("request")
|
||||
assert "http://example3.com" == resp.get("headers").get("referer")
|
||||
assert urlencode({"Redirect": "http://example2.com"}) in result.process_data.get("request", "")
|
||||
assert 'form-data; name="Redirect"\n\nhttp://example6.com' in result.process_data.get("request", "")
|
||||
assert "http://example3.com" == resp.get("headers", {}).get("referer")
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
@@ -10,28 +13,77 @@ from core.entities.provider_entities import CustomConfiguration, CustomProviderC
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers import ModelProviderFactory
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.event import RunCompletedEvent
|
||||
from core.workflow.nodes.llm.llm_node import LLMNode
|
||||
from extensions.ext_database import db
|
||||
from models.provider import ProviderType
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_execute_llm(setup_openai_mock):
|
||||
node = LLMNode(
|
||||
def init_llm_node(config: dict) -> LLMNode:
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-next-target",
|
||||
"source": "start",
|
||||
"target": "llm",
|
||||
},
|
||||
],
|
||||
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather today?",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
variable_pool.add(["abc", "output"], "sunny")
|
||||
|
||||
node = LLMNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config=config,
|
||||
)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_execute_llm(setup_openai_mock):
|
||||
node = init_llm_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -49,19 +101,6 @@ def test_execute_llm(setup_openai_mock):
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather today?",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["abc", "output"], "sunny")
|
||||
|
||||
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
|
||||
|
||||
provider_instance = ModelProviderFactory().get_provider_instance("openai")
|
||||
@@ -80,13 +119,15 @@ def test_execute_llm(setup_openai_mock):
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
|
||||
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model="gpt-3.5-turbo",
|
||||
provider="openai",
|
||||
mode="chat",
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
@@ -96,11 +137,16 @@ def test_execute_llm(setup_openai_mock):
|
||||
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
assert isinstance(result, Generator)
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs["text"] is not None
|
||||
assert result.outputs["usage"]["total_tokens"] > 0
|
||||
for item in result:
|
||||
if isinstance(item, RunCompletedEvent):
|
||||
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert item.run_result.process_data is not None
|
||||
assert item.run_result.outputs is not None
|
||||
assert item.run_result.outputs.get("text") is not None
|
||||
assert item.run_result.outputs.get("usage", {})["total_tokens"] > 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
|
||||
@@ -109,13 +155,7 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
||||
"""
|
||||
Test execute LLM node with jinja2
|
||||
"""
|
||||
node = LLMNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_llm_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -149,19 +189,6 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather today?",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["abc", "output"], "sunny")
|
||||
|
||||
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
|
||||
|
||||
provider_instance = ModelProviderFactory().get_provider_instance("openai")
|
||||
@@ -181,14 +208,15 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
||||
)
|
||||
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
|
||||
|
||||
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model="gpt-3.5-turbo",
|
||||
provider="openai",
|
||||
mode="chat",
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
@@ -198,8 +226,11 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
|
||||
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert "sunny" in json.dumps(result.process_data)
|
||||
assert "what's the weather today?" in json.dumps(result.process_data)
|
||||
for item in result:
|
||||
if isinstance(item, RunCompletedEvent):
|
||||
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert item.run_result.process_data is not None
|
||||
assert "sunny" in json.dumps(item.run_result.process_data)
|
||||
assert "what's the weather today?" in json.dumps(item.run_result.process_data)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from typing import Optional
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
@@ -8,19 +10,21 @@ import pytest
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom, ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode
|
||||
from extensions.ext_database import db
|
||||
from models.provider import ProviderType
|
||||
|
||||
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@@ -47,13 +51,15 @@ def get_mocked_fetch_model_config(
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
|
||||
model_schema = model_type_instance.get_model_schema(model)
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model=model,
|
||||
provider=provider,
|
||||
mode=mode,
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_type_instance.get_model_schema(model),
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
@@ -74,18 +80,62 @@ def get_mocked_fetch_memory(memory_text: str):
|
||||
return MagicMock(return_value=MemoryMock())
|
||||
|
||||
|
||||
def init_parameter_extractor_node(config: dict):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-next-target",
|
||||
"source": "start",
|
||||
"target": "llm",
|
||||
},
|
||||
],
|
||||
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
variable_pool.add(["a", "b123", "args1"], 1)
|
||||
variable_pool.add(["a", "b123", "args2"], 2)
|
||||
|
||||
return ParameterExtractorNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
|
||||
def test_function_calling_parameter_extractor(setup_openai_mock):
|
||||
"""
|
||||
Test function calling for parameter extractor.
|
||||
"""
|
||||
node = ParameterExtractorNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_parameter_extractor_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -98,7 +148,7 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
|
||||
"reasoning_mode": "function_call",
|
||||
"memory": None,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
node._fetch_model_config = get_mocked_fetch_model_config(
|
||||
@@ -121,9 +171,10 @@ def test_function_calling_parameter_extractor(setup_openai_mock):
|
||||
environment_variables=[],
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs.get("location") == "kawaii"
|
||||
assert result.outputs.get("__reason") == None
|
||||
|
||||
@@ -133,13 +184,7 @@ def test_instructions(setup_openai_mock):
|
||||
"""
|
||||
Test chat parameter extractor.
|
||||
"""
|
||||
node = ParameterExtractorNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_parameter_extractor_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -163,29 +208,19 @@ def test_instructions(setup_openai_mock):
|
||||
)
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs.get("location") == "kawaii"
|
||||
assert result.outputs.get("__reason") == None
|
||||
|
||||
process_data = result.process_data
|
||||
|
||||
assert process_data is not None
|
||||
process_data.get("prompts")
|
||||
|
||||
for prompt in process_data.get("prompts"):
|
||||
for prompt in process_data.get("prompts", []):
|
||||
if prompt.get("role") == "system":
|
||||
assert "what's the weather in SF" in prompt.get("text")
|
||||
|
||||
@@ -195,13 +230,7 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
|
||||
"""
|
||||
Test chat parameter extractor.
|
||||
"""
|
||||
node = ParameterExtractorNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_parameter_extractor_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -225,27 +254,17 @@ def test_chat_parameter_extractor(setup_anthropic_mock):
|
||||
)
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs.get("location") == ""
|
||||
assert (
|
||||
result.outputs.get("__reason")
|
||||
== "Failed to extract result from function call or text response, using empty result."
|
||||
)
|
||||
prompts = result.process_data.get("prompts")
|
||||
assert result.process_data is not None
|
||||
prompts = result.process_data.get("prompts", [])
|
||||
|
||||
for prompt in prompts:
|
||||
if prompt.get("role") == "user":
|
||||
@@ -258,13 +277,7 @@ def test_completion_parameter_extractor(setup_openai_mock):
|
||||
"""
|
||||
Test completion parameter extractor.
|
||||
"""
|
||||
node = ParameterExtractorNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_parameter_extractor_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -293,28 +306,18 @@ def test_completion_parameter_extractor(setup_openai_mock):
|
||||
)
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs.get("location") == ""
|
||||
assert (
|
||||
result.outputs.get("__reason")
|
||||
== "Failed to extract result from function call or text response, using empty result."
|
||||
)
|
||||
assert len(result.process_data.get("prompts")) == 1
|
||||
assert "SF" in result.process_data.get("prompts")[0].get("text")
|
||||
assert result.process_data is not None
|
||||
assert len(result.process_data.get("prompts", [])) == 1
|
||||
assert "SF" in result.process_data.get("prompts", [])[0].get("text")
|
||||
|
||||
|
||||
def test_extract_json_response():
|
||||
@@ -322,13 +325,7 @@ def test_extract_json_response():
|
||||
Test extract json response.
|
||||
"""
|
||||
|
||||
node = ParameterExtractorNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_parameter_extractor_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -357,6 +354,7 @@ def test_extract_json_response():
|
||||
hello world.
|
||||
""")
|
||||
|
||||
assert result is not None
|
||||
assert result["location"] == "kawaii"
|
||||
|
||||
|
||||
@@ -365,13 +363,7 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
|
||||
"""
|
||||
Test chat parameter extractor with memory.
|
||||
"""
|
||||
node = ParameterExtractorNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_parameter_extractor_node(
|
||||
config={
|
||||
"id": "llm",
|
||||
"data": {
|
||||
@@ -396,27 +388,17 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
|
||||
node._fetch_memory = get_mocked_fetch_memory("customized memory")
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs.get("location") == ""
|
||||
assert (
|
||||
result.outputs.get("__reason")
|
||||
== "Failed to extract result from function call or text response, using empty result."
|
||||
)
|
||||
prompts = result.process_data.get("prompts")
|
||||
assert result.process_data is not None
|
||||
prompts = result.process_data.get("prompts", [])
|
||||
|
||||
latest_role = None
|
||||
for prompt in prompts:
|
||||
|
||||
@@ -1,46 +1,84 @@
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
|
||||
def test_execute_code(setup_code_executor_mock):
|
||||
code = """{{args2}}"""
|
||||
node = TemplateTransformNode(
|
||||
config = {
|
||||
"id": "1",
|
||||
"data": {
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"template": code,
|
||||
},
|
||||
}
|
||||
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-next-target",
|
||||
"source": "start",
|
||||
"target": "1",
|
||||
},
|
||||
],
|
||||
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.END_USER,
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
"variable": "args1",
|
||||
"value_selector": ["1", "123", "args1"],
|
||||
},
|
||||
{"variable": "args2", "value_selector": ["1", "123", "args2"]},
|
||||
],
|
||||
"template": code,
|
||||
},
|
||||
},
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
|
||||
pool.add(["1", "123", "args1"], 1)
|
||||
pool.add(["1", "123", "args2"], 3)
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
variable_pool.add(["1", "123", "args1"], 1)
|
||||
variable_pool.add(["1", "123", "args2"], 3)
|
||||
|
||||
node = TemplateTransformNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config=config,
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert result.outputs["output"] == "3"
|
||||
|
||||
@@ -1,21 +1,62 @@
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import NodeRunResult, UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.tool.tool_node import ToolNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
def init_tool_node(config: dict):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-next-target",
|
||||
"source": "start",
|
||||
"target": "1",
|
||||
},
|
||||
],
|
||||
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
|
||||
return ToolNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
def test_tool_variable_invoke():
|
||||
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
|
||||
pool.add(["1", "123", "args1"], "1+1")
|
||||
|
||||
node = ToolNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_tool_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -34,28 +75,22 @@ def test_tool_variable_invoke():
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
node.graph_runtime_state.variable_pool.add(["1", "123", "args1"], "1+1")
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
assert isinstance(result, NodeRunResult)
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert "2" in result.outputs["text"]
|
||||
assert result.outputs["files"] == []
|
||||
|
||||
|
||||
def test_tool_mixed_invoke():
|
||||
pool = VariablePool(system_variables={}, user_inputs={}, environment_variables=[])
|
||||
pool.add(["1", "args1"], "1+1")
|
||||
|
||||
node = ToolNode(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_id="1",
|
||||
user_id="1",
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
node = init_tool_node(
|
||||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
@@ -74,12 +109,15 @@ def test_tool_mixed_invoke():
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
# execute node
|
||||
result = node.run(pool)
|
||||
node.graph_runtime_state.variable_pool.add(["1", "args1"], "1+1")
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
assert isinstance(result, NodeRunResult)
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs is not None
|
||||
assert "2" in result.outputs["text"]
|
||||
assert result.outputs["files"] == []
|
||||
|
||||
@@ -1,7 +1,24 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from flask import Flask
|
||||
|
||||
# Getting the absolute path of the current file's directory
|
||||
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Getting the absolute path of the project's root directory
|
||||
PROJECT_DIR = os.path.abspath(os.path.join(ABS_PATH, os.pardir, os.pardir))
|
||||
|
||||
CACHED_APP = Flask(__name__)
|
||||
CACHED_APP.config.update({"TESTING": True})
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def app() -> Flask:
|
||||
return CACHED_APP
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _provide_app_context(app: Flask):
|
||||
with app.app_context():
|
||||
yield
|
||||
|
||||
791
api/tests/unit_tests/core/workflow/graph_engine/test_graph.py
Normal file
791
api/tests/unit_tests/core/workflow/graph_engine/test_graph.py
Normal file
@@ -0,0 +1,791 @@
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.run_condition import RunCondition
|
||||
from core.workflow.utils.condition.entities import Condition
|
||||
|
||||
|
||||
def test_init():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "llm-source-answer-target",
|
||||
"source": "llm",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "start-source-qc-target",
|
||||
"source": "start",
|
||||
"target": "qc",
|
||||
},
|
||||
{
|
||||
"id": "qc-1-llm-target",
|
||||
"source": "qc",
|
||||
"sourceHandle": "1",
|
||||
"target": "llm",
|
||||
},
|
||||
{
|
||||
"id": "qc-2-http-target",
|
||||
"source": "qc",
|
||||
"sourceHandle": "2",
|
||||
"target": "http",
|
||||
},
|
||||
{
|
||||
"id": "http-source-answer2-target",
|
||||
"source": "http",
|
||||
"target": "answer2",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
{
|
||||
"data": {"type": "question-classifier"},
|
||||
"id": "qc",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
},
|
||||
"id": "http",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer2",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
start_node_id = "start"
|
||||
|
||||
assert graph.root_node_id == start_node_id
|
||||
assert graph.edge_mapping.get(start_node_id)[0].target_node_id == "qc"
|
||||
assert {"llm", "http"} == {node.target_node_id for node in graph.edge_mapping.get("qc")}
|
||||
|
||||
|
||||
def test__init_iteration_graph():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "llm-answer",
|
||||
"source": "llm",
|
||||
"sourceHandle": "source",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "iteration-source-llm-target",
|
||||
"source": "iteration",
|
||||
"sourceHandle": "source",
|
||||
"target": "llm",
|
||||
},
|
||||
{
|
||||
"id": "template-transform-in-iteration-source-llm-in-iteration-target",
|
||||
"source": "template-transform-in-iteration",
|
||||
"sourceHandle": "source",
|
||||
"target": "llm-in-iteration",
|
||||
},
|
||||
{
|
||||
"id": "llm-in-iteration-source-answer-in-iteration-target",
|
||||
"source": "llm-in-iteration",
|
||||
"sourceHandle": "source",
|
||||
"target": "answer-in-iteration",
|
||||
},
|
||||
{
|
||||
"id": "start-source-code-target",
|
||||
"source": "start",
|
||||
"sourceHandle": "source",
|
||||
"target": "code",
|
||||
},
|
||||
{
|
||||
"id": "code-source-iteration-target",
|
||||
"source": "code",
|
||||
"sourceHandle": "source",
|
||||
"target": "iteration",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"type": "start",
|
||||
},
|
||||
"id": "start",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
{
|
||||
"data": {"type": "iteration"},
|
||||
"id": "iteration",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "template-transform",
|
||||
},
|
||||
"id": "template-transform-in-iteration",
|
||||
"parentId": "iteration",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm-in-iteration",
|
||||
"parentId": "iteration",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer-in-iteration",
|
||||
"parentId": "iteration",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config, root_node_id="template-transform-in-iteration")
|
||||
graph.add_extra_edge(
|
||||
source_node_id="answer-in-iteration",
|
||||
target_node_id="template-transform-in-iteration",
|
||||
run_condition=RunCondition(
|
||||
type="condition",
|
||||
conditions=[Condition(variable_selector=["iteration", "index"], comparison_operator="≤", value="5")],
|
||||
),
|
||||
)
|
||||
|
||||
# iteration:
|
||||
# [template-transform-in-iteration -> llm-in-iteration -> answer-in-iteration]
|
||||
|
||||
assert graph.root_node_id == "template-transform-in-iteration"
|
||||
assert graph.edge_mapping.get("template-transform-in-iteration")[0].target_node_id == "llm-in-iteration"
|
||||
assert graph.edge_mapping.get("llm-in-iteration")[0].target_node_id == "answer-in-iteration"
|
||||
assert graph.edge_mapping.get("answer-in-iteration")[0].target_node_id == "template-transform-in-iteration"
|
||||
|
||||
|
||||
def test_parallels_graph():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-answer-target",
|
||||
"source": "llm1",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-answer-target",
|
||||
"source": "llm2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-answer-target",
|
||||
"source": "llm3",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
assert graph.root_node_id == "start"
|
||||
for i in range(3):
|
||||
start_edges = graph.edge_mapping.get("start")
|
||||
assert start_edges is not None
|
||||
assert start_edges[i].target_node_id == f"llm{i+1}"
|
||||
|
||||
llm_edges = graph.edge_mapping.get(f"llm{i+1}")
|
||||
assert llm_edges is not None
|
||||
assert llm_edges[0].target_node_id == "answer"
|
||||
|
||||
assert len(graph.parallel_mapping) == 1
|
||||
assert len(graph.node_parallel_mapping) == 3
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3"]:
|
||||
assert node_id in graph.node_parallel_mapping
|
||||
|
||||
|
||||
def test_parallels_graph2():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-answer-target",
|
||||
"source": "llm1",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-answer-target",
|
||||
"source": "llm2",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
assert graph.root_node_id == "start"
|
||||
for i in range(3):
|
||||
assert graph.edge_mapping.get("start")[i].target_node_id == f"llm{i + 1}"
|
||||
|
||||
if i < 2:
|
||||
assert graph.edge_mapping.get(f"llm{i + 1}") is not None
|
||||
assert graph.edge_mapping.get(f"llm{i + 1}")[0].target_node_id == "answer"
|
||||
|
||||
assert len(graph.parallel_mapping) == 1
|
||||
assert len(graph.node_parallel_mapping) == 3
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3"]:
|
||||
assert node_id in graph.node_parallel_mapping
|
||||
|
||||
|
||||
def test_parallels_graph3():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
assert graph.root_node_id == "start"
|
||||
for i in range(3):
|
||||
assert graph.edge_mapping.get("start")[i].target_node_id == f"llm{i + 1}"
|
||||
|
||||
assert len(graph.parallel_mapping) == 1
|
||||
assert len(graph.node_parallel_mapping) == 3
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3"]:
|
||||
assert node_id in graph.node_parallel_mapping
|
||||
|
||||
|
||||
def test_parallels_graph4():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-answer-target",
|
||||
"source": "llm1",
|
||||
"target": "code1",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-answer-target",
|
||||
"source": "llm2",
|
||||
"target": "code2",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-code3-target",
|
||||
"source": "llm3",
|
||||
"target": "code3",
|
||||
},
|
||||
{
|
||||
"id": "code1-source-answer-target",
|
||||
"source": "code1",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "code2-source-answer-target",
|
||||
"source": "code2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "code3-source-answer-target",
|
||||
"source": "code3",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
assert graph.root_node_id == "start"
|
||||
for i in range(3):
|
||||
assert graph.edge_mapping.get("start")[i].target_node_id == f"llm{i + 1}"
|
||||
assert graph.edge_mapping.get(f"llm{i + 1}") is not None
|
||||
assert graph.edge_mapping.get(f"llm{i + 1}")[0].target_node_id == f"code{i + 1}"
|
||||
assert graph.edge_mapping.get(f"code{i + 1}") is not None
|
||||
assert graph.edge_mapping.get(f"code{i + 1}")[0].target_node_id == "answer"
|
||||
|
||||
assert len(graph.parallel_mapping) == 1
|
||||
assert len(graph.node_parallel_mapping) == 6
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3", "code1", "code2", "code3"]:
|
||||
assert node_id in graph.node_parallel_mapping
|
||||
|
||||
|
||||
def test_parallels_graph5():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm4",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm5",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-code1-target",
|
||||
"source": "llm1",
|
||||
"target": "code1",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-code1-target",
|
||||
"source": "llm2",
|
||||
"target": "code1",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-code2-target",
|
||||
"source": "llm3",
|
||||
"target": "code2",
|
||||
},
|
||||
{
|
||||
"id": "llm4-source-code2-target",
|
||||
"source": "llm4",
|
||||
"target": "code2",
|
||||
},
|
||||
{
|
||||
"id": "llm5-source-code3-target",
|
||||
"source": "llm5",
|
||||
"target": "code3",
|
||||
},
|
||||
{
|
||||
"id": "code1-source-answer-target",
|
||||
"source": "code1",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "code2-source-answer-target",
|
||||
"source": "code2",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm4",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm5",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
assert graph.root_node_id == "start"
|
||||
for i in range(5):
|
||||
assert graph.edge_mapping.get("start")[i].target_node_id == f"llm{i + 1}"
|
||||
|
||||
assert graph.edge_mapping.get("llm1") is not None
|
||||
assert graph.edge_mapping.get("llm1")[0].target_node_id == "code1"
|
||||
assert graph.edge_mapping.get("llm2") is not None
|
||||
assert graph.edge_mapping.get("llm2")[0].target_node_id == "code1"
|
||||
assert graph.edge_mapping.get("llm3") is not None
|
||||
assert graph.edge_mapping.get("llm3")[0].target_node_id == "code2"
|
||||
assert graph.edge_mapping.get("llm4") is not None
|
||||
assert graph.edge_mapping.get("llm4")[0].target_node_id == "code2"
|
||||
assert graph.edge_mapping.get("llm5") is not None
|
||||
assert graph.edge_mapping.get("llm5")[0].target_node_id == "code3"
|
||||
assert graph.edge_mapping.get("code1") is not None
|
||||
assert graph.edge_mapping.get("code1")[0].target_node_id == "answer"
|
||||
assert graph.edge_mapping.get("code2") is not None
|
||||
assert graph.edge_mapping.get("code2")[0].target_node_id == "answer"
|
||||
|
||||
assert len(graph.parallel_mapping) == 1
|
||||
assert len(graph.node_parallel_mapping) == 8
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3", "llm4", "llm5", "code1", "code2", "code3"]:
|
||||
assert node_id in graph.node_parallel_mapping
|
||||
|
||||
|
||||
def test_parallels_graph6():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-code1-target",
|
||||
"source": "llm1",
|
||||
"target": "code1",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-code2-target",
|
||||
"source": "llm1",
|
||||
"target": "code2",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-code3-target",
|
||||
"source": "llm2",
|
||||
"target": "code3",
|
||||
},
|
||||
{
|
||||
"id": "code1-source-answer-target",
|
||||
"source": "code1",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "code2-source-answer-target",
|
||||
"source": "code2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "code3-source-answer-target",
|
||||
"source": "code3",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-answer-target",
|
||||
"source": "llm3",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "code",
|
||||
},
|
||||
"id": "code3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1"},
|
||||
"id": "answer",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
assert graph.root_node_id == "start"
|
||||
for i in range(3):
|
||||
assert graph.edge_mapping.get("start")[i].target_node_id == f"llm{i + 1}"
|
||||
|
||||
assert graph.edge_mapping.get("llm1") is not None
|
||||
assert graph.edge_mapping.get("llm1")[0].target_node_id == "code1"
|
||||
assert graph.edge_mapping.get("llm1") is not None
|
||||
assert graph.edge_mapping.get("llm1")[1].target_node_id == "code2"
|
||||
assert graph.edge_mapping.get("llm2") is not None
|
||||
assert graph.edge_mapping.get("llm2")[0].target_node_id == "code3"
|
||||
assert graph.edge_mapping.get("code1") is not None
|
||||
assert graph.edge_mapping.get("code1")[0].target_node_id == "answer"
|
||||
assert graph.edge_mapping.get("code2") is not None
|
||||
assert graph.edge_mapping.get("code2")[0].target_node_id == "answer"
|
||||
assert graph.edge_mapping.get("code3") is not None
|
||||
assert graph.edge_mapping.get("code3")[0].target_node_id == "answer"
|
||||
|
||||
assert len(graph.parallel_mapping) == 2
|
||||
assert len(graph.node_parallel_mapping) == 6
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3", "code1", "code2", "code3"]:
|
||||
assert node_id in graph.node_parallel_mapping
|
||||
|
||||
parent_parallel = None
|
||||
child_parallel = None
|
||||
for p_id, parallel in graph.parallel_mapping.items():
|
||||
if parallel.parent_parallel_id is None:
|
||||
parent_parallel = parallel
|
||||
else:
|
||||
child_parallel = parallel
|
||||
|
||||
for node_id in ["llm1", "llm2", "llm3", "code3"]:
|
||||
assert graph.node_parallel_mapping[node_id] == parent_parallel.id
|
||||
|
||||
for node_id in ["code1", "code2"]:
|
||||
assert graph.node_parallel_mapping[node_id] == child_parallel.id
|
||||
@@ -0,0 +1,505 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.event import (
|
||||
BaseNodeEvent,
|
||||
GraphRunFailedEvent,
|
||||
GraphRunStartedEvent,
|
||||
GraphRunSucceededEvent,
|
||||
NodeRunFailedEvent,
|
||||
NodeRunStartedEvent,
|
||||
NodeRunStreamChunkEvent,
|
||||
NodeRunSucceededEvent,
|
||||
)
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.runtime_route_state import RouteNodeState
|
||||
from core.workflow.graph_engine.graph_engine import GraphEngine
|
||||
from core.workflow.nodes.event import RunCompletedEvent, RunStreamChunkEvent
|
||||
from core.workflow.nodes.llm.llm_node import LLMNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
@patch("extensions.ext_database.db.session.remove")
|
||||
@patch("extensions.ext_database.db.session.close")
|
||||
def test_run_parallel_in_workflow(mock_close, mock_remove):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "1",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"source": "llm1",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"source": "llm1",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"source": "llm2",
|
||||
"target": "end1",
|
||||
},
|
||||
{
|
||||
"id": "5",
|
||||
"source": "llm3",
|
||||
"target": "end2",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"type": "start",
|
||||
"title": "start",
|
||||
"variables": [
|
||||
{
|
||||
"label": "query",
|
||||
"max_length": 48,
|
||||
"options": [],
|
||||
"required": True,
|
||||
"type": "text-input",
|
||||
"variable": "query",
|
||||
}
|
||||
],
|
||||
},
|
||||
"id": "start",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
"title": "llm1",
|
||||
"context": {"enabled": False, "variable_selector": []},
|
||||
"model": {
|
||||
"completion_params": {"temperature": 0.7},
|
||||
"mode": "chat",
|
||||
"name": "gpt-4o",
|
||||
"provider": "openai",
|
||||
},
|
||||
"prompt_template": [
|
||||
{"role": "system", "text": "say hi"},
|
||||
{"role": "user", "text": "{{#start.query#}}"},
|
||||
],
|
||||
"vision": {"configs": {"detail": "high"}, "enabled": False},
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
"title": "llm2",
|
||||
"context": {"enabled": False, "variable_selector": []},
|
||||
"model": {
|
||||
"completion_params": {"temperature": 0.7},
|
||||
"mode": "chat",
|
||||
"name": "gpt-4o",
|
||||
"provider": "openai",
|
||||
},
|
||||
"prompt_template": [
|
||||
{"role": "system", "text": "say bye"},
|
||||
{"role": "user", "text": "{{#start.query#}}"},
|
||||
],
|
||||
"vision": {"configs": {"detail": "high"}, "enabled": False},
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
"title": "llm3",
|
||||
"context": {"enabled": False, "variable_selector": []},
|
||||
"model": {
|
||||
"completion_params": {"temperature": 0.7},
|
||||
"mode": "chat",
|
||||
"name": "gpt-4o",
|
||||
"provider": "openai",
|
||||
},
|
||||
"prompt_template": [
|
||||
{"role": "system", "text": "say good morning"},
|
||||
{"role": "user", "text": "{{#start.query#}}"},
|
||||
],
|
||||
"vision": {"configs": {"detail": "high"}, "enabled": False},
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "end",
|
||||
"title": "end1",
|
||||
"outputs": [
|
||||
{"value_selector": ["llm2", "text"], "variable": "result2"},
|
||||
{"value_selector": ["start", "query"], "variable": "query"},
|
||||
],
|
||||
},
|
||||
"id": "end1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "end",
|
||||
"title": "end2",
|
||||
"outputs": [
|
||||
{"value_selector": ["llm1", "text"], "variable": "result1"},
|
||||
{"value_selector": ["llm3", "text"], "variable": "result3"},
|
||||
],
|
||||
},
|
||||
"id": "end2",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"}, user_inputs={"query": "hi"}
|
||||
)
|
||||
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id="111",
|
||||
app_id="222",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="333",
|
||||
graph_config=graph_config,
|
||||
user_id="444",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
call_depth=0,
|
||||
graph=graph,
|
||||
variable_pool=variable_pool,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=1200,
|
||||
)
|
||||
|
||||
def llm_generator(self):
|
||||
contents = ["hi", "bye", "good morning"]
|
||||
|
||||
yield RunStreamChunkEvent(
|
||||
chunk_content=contents[int(self.node_id[-1]) - 1], from_variable_selector=[self.node_id, "text"]
|
||||
)
|
||||
|
||||
yield RunCompletedEvent(
|
||||
run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
inputs={},
|
||||
process_data={},
|
||||
outputs={},
|
||||
metadata={
|
||||
NodeRunMetadataKey.TOTAL_TOKENS: 1,
|
||||
NodeRunMetadataKey.TOTAL_PRICE: 1,
|
||||
NodeRunMetadataKey.CURRENCY: "USD",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# print("")
|
||||
|
||||
with patch.object(LLMNode, "_run", new=llm_generator):
|
||||
items = []
|
||||
generator = graph_engine.run()
|
||||
for item in generator:
|
||||
# print(type(item), item)
|
||||
items.append(item)
|
||||
if isinstance(item, NodeRunSucceededEvent):
|
||||
assert item.route_node_state.status == RouteNodeState.Status.SUCCESS
|
||||
|
||||
assert not isinstance(item, NodeRunFailedEvent)
|
||||
assert not isinstance(item, GraphRunFailedEvent)
|
||||
|
||||
if isinstance(item, BaseNodeEvent) and item.route_node_state.node_id in ["llm2", "llm3", "end1", "end2"]:
|
||||
assert item.parallel_id is not None
|
||||
|
||||
assert len(items) == 18
|
||||
assert isinstance(items[0], GraphRunStartedEvent)
|
||||
assert isinstance(items[1], NodeRunStartedEvent)
|
||||
assert items[1].route_node_state.node_id == "start"
|
||||
assert isinstance(items[2], NodeRunSucceededEvent)
|
||||
assert items[2].route_node_state.node_id == "start"
|
||||
|
||||
|
||||
@patch("extensions.ext_database.db.session.remove")
|
||||
@patch("extensions.ext_database.db.session.close")
|
||||
def test_run_parallel_in_chatflow(mock_close, mock_remove):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "1",
|
||||
"source": "start",
|
||||
"target": "answer1",
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"source": "answer1",
|
||||
"target": "answer2",
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"source": "answer1",
|
||||
"target": "answer3",
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"source": "answer2",
|
||||
"target": "answer4",
|
||||
},
|
||||
{
|
||||
"id": "5",
|
||||
"source": "answer3",
|
||||
"target": "answer5",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start", "title": "start"}, "id": "start"},
|
||||
{"data": {"type": "answer", "title": "answer1", "answer": "1"}, "id": "answer1"},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer2", "answer": "2"},
|
||||
"id": "answer2",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer3", "answer": "3"},
|
||||
"id": "answer3",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer4", "answer": "4"},
|
||||
"id": "answer4",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer5", "answer": "5"},
|
||||
"id": "answer5",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
)
|
||||
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id="111",
|
||||
app_id="222",
|
||||
workflow_type=WorkflowType.CHAT,
|
||||
workflow_id="333",
|
||||
graph_config=graph_config,
|
||||
user_id="444",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
call_depth=0,
|
||||
graph=graph,
|
||||
variable_pool=variable_pool,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=1200,
|
||||
)
|
||||
|
||||
# print("")
|
||||
|
||||
items = []
|
||||
generator = graph_engine.run()
|
||||
for item in generator:
|
||||
# print(type(item), item)
|
||||
items.append(item)
|
||||
if isinstance(item, NodeRunSucceededEvent):
|
||||
assert item.route_node_state.status == RouteNodeState.Status.SUCCESS
|
||||
|
||||
assert not isinstance(item, NodeRunFailedEvent)
|
||||
assert not isinstance(item, GraphRunFailedEvent)
|
||||
|
||||
if isinstance(item, BaseNodeEvent) and item.route_node_state.node_id in [
|
||||
"answer2",
|
||||
"answer3",
|
||||
"answer4",
|
||||
"answer5",
|
||||
]:
|
||||
assert item.parallel_id is not None
|
||||
|
||||
assert len(items) == 23
|
||||
assert isinstance(items[0], GraphRunStartedEvent)
|
||||
assert isinstance(items[1], NodeRunStartedEvent)
|
||||
assert items[1].route_node_state.node_id == "start"
|
||||
assert isinstance(items[2], NodeRunSucceededEvent)
|
||||
assert items[2].route_node_state.node_id == "start"
|
||||
|
||||
|
||||
@patch("extensions.ext_database.db.session.remove")
|
||||
@patch("extensions.ext_database.db.session.close")
|
||||
def test_run_branch(mock_close, mock_remove):
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "1",
|
||||
"source": "start",
|
||||
"target": "if-else-1",
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"source": "if-else-1",
|
||||
"sourceHandle": "true",
|
||||
"target": "answer-1",
|
||||
},
|
||||
{
|
||||
"id": "3",
|
||||
"source": "if-else-1",
|
||||
"sourceHandle": "false",
|
||||
"target": "if-else-2",
|
||||
},
|
||||
{
|
||||
"id": "4",
|
||||
"source": "if-else-2",
|
||||
"sourceHandle": "true",
|
||||
"target": "answer-2",
|
||||
},
|
||||
{
|
||||
"id": "5",
|
||||
"source": "if-else-2",
|
||||
"sourceHandle": "false",
|
||||
"target": "answer-3",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"title": "Start",
|
||||
"type": "start",
|
||||
"variables": [
|
||||
{
|
||||
"label": "uid",
|
||||
"max_length": 48,
|
||||
"options": [],
|
||||
"required": True,
|
||||
"type": "text-input",
|
||||
"variable": "uid",
|
||||
}
|
||||
],
|
||||
},
|
||||
"id": "start",
|
||||
},
|
||||
{
|
||||
"data": {"answer": "1 {{#start.uid#}}", "title": "Answer", "type": "answer", "variables": []},
|
||||
"id": "answer-1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"cases": [
|
||||
{
|
||||
"case_id": "true",
|
||||
"conditions": [
|
||||
{
|
||||
"comparison_operator": "contains",
|
||||
"id": "b0f02473-08b6-4a81-af91-15345dcb2ec8",
|
||||
"value": "hi",
|
||||
"varType": "string",
|
||||
"variable_selector": ["sys", "query"],
|
||||
}
|
||||
],
|
||||
"id": "true",
|
||||
"logical_operator": "and",
|
||||
}
|
||||
],
|
||||
"desc": "",
|
||||
"title": "IF/ELSE",
|
||||
"type": "if-else",
|
||||
},
|
||||
"id": "if-else-1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"cases": [
|
||||
{
|
||||
"case_id": "true",
|
||||
"conditions": [
|
||||
{
|
||||
"comparison_operator": "contains",
|
||||
"id": "ae895199-5608-433b-b5f0-0997ae1431e4",
|
||||
"value": "takatost",
|
||||
"varType": "string",
|
||||
"variable_selector": ["sys", "query"],
|
||||
}
|
||||
],
|
||||
"id": "true",
|
||||
"logical_operator": "and",
|
||||
}
|
||||
],
|
||||
"title": "IF/ELSE 2",
|
||||
"type": "if-else",
|
||||
},
|
||||
"id": "if-else-2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"answer": "2",
|
||||
"title": "Answer 2",
|
||||
"type": "answer",
|
||||
},
|
||||
"id": "answer-2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"answer": "3",
|
||||
"title": "Answer 3",
|
||||
"type": "answer",
|
||||
},
|
||||
"id": "answer-3",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "hi",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={"uid": "takato"},
|
||||
)
|
||||
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id="111",
|
||||
app_id="222",
|
||||
workflow_type=WorkflowType.CHAT,
|
||||
workflow_id="333",
|
||||
graph_config=graph_config,
|
||||
user_id="444",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.WEB_APP,
|
||||
call_depth=0,
|
||||
graph=graph,
|
||||
variable_pool=variable_pool,
|
||||
max_execution_steps=500,
|
||||
max_execution_time=1200,
|
||||
)
|
||||
|
||||
# print("")
|
||||
|
||||
items = []
|
||||
generator = graph_engine.run()
|
||||
for item in generator:
|
||||
# print(type(item), item)
|
||||
items.append(item)
|
||||
|
||||
assert len(items) == 10
|
||||
assert items[3].route_node_state.node_id == "if-else-1"
|
||||
assert items[4].route_node_state.node_id == "if-else-1"
|
||||
assert isinstance(items[5], NodeRunStreamChunkEvent)
|
||||
assert items[5].chunk_content == "1 "
|
||||
assert isinstance(items[6], NodeRunStreamChunkEvent)
|
||||
assert items[6].chunk_content == "takato"
|
||||
assert items[7].route_node_state.node_id == "answer-1"
|
||||
assert items[8].route_node_state.node_id == "answer-1"
|
||||
assert items[8].route_node_state.node_run_result.outputs["answer"] == "1 takato"
|
||||
assert isinstance(items[9], GraphRunSucceededEvent)
|
||||
|
||||
# print(graph_engine.graph_runtime_state.model_dump_json(indent=2))
|
||||
@@ -0,0 +1,82 @@
|
||||
import time
|
||||
import uuid
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.answer.answer_node import AnswerNode
|
||||
from extensions.ext_database import db
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
def test_execute_answer():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm-target",
|
||||
"source": "start",
|
||||
"target": "llm",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["start", "weather"], "sunny")
|
||||
pool.add(["llm", "text"], "You are a helpful AI.")
|
||||
|
||||
node = AnswerNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "answer",
|
||||
"data": {
|
||||
"title": "123",
|
||||
"type": "answer",
|
||||
"answer": "Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs["answer"] == "Today's weather is sunny\nYou are a helpful AI.\n{{img}}\nFin."
|
||||
@@ -0,0 +1,109 @@
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.nodes.answer.answer_stream_generate_router import AnswerStreamGeneratorRouter
|
||||
|
||||
|
||||
def test_init():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-llm4-target",
|
||||
"source": "llm3",
|
||||
"target": "llm4",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-llm5-target",
|
||||
"source": "llm3",
|
||||
"target": "llm5",
|
||||
},
|
||||
{
|
||||
"id": "llm4-source-answer2-target",
|
||||
"source": "llm4",
|
||||
"target": "answer2",
|
||||
},
|
||||
{
|
||||
"id": "llm5-source-answer-target",
|
||||
"source": "llm5",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "answer2-source-answer-target",
|
||||
"source": "answer2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-answer-target",
|
||||
"source": "llm2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-answer-target",
|
||||
"source": "llm1",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm4",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm5",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "1{{#llm2.text#}}2"},
|
||||
"id": "answer",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer2", "answer": "1{{#llm3.text#}}2"},
|
||||
"id": "answer2",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
answer_stream_generate_route = AnswerStreamGeneratorRouter.init(
|
||||
node_id_config_mapping=graph.node_id_config_mapping, reverse_edge_mapping=graph.reverse_edge_mapping
|
||||
)
|
||||
|
||||
assert answer_stream_generate_route.answer_dependencies["answer"] == ["answer2"]
|
||||
assert answer_stream_generate_route.answer_dependencies["answer2"] == []
|
||||
@@ -0,0 +1,216 @@
|
||||
import uuid
|
||||
from collections.abc import Generator
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from core.workflow.entities.node_entities import NodeType
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.event import (
|
||||
GraphEngineEvent,
|
||||
NodeRunStartedEvent,
|
||||
NodeRunStreamChunkEvent,
|
||||
NodeRunSucceededEvent,
|
||||
)
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.runtime_route_state import RouteNodeState
|
||||
from core.workflow.nodes.answer.answer_stream_processor import AnswerStreamProcessor
|
||||
from core.workflow.nodes.start.entities import StartNodeData
|
||||
|
||||
|
||||
def _recursive_process(graph: Graph, next_node_id: str) -> Generator[GraphEngineEvent, None, None]:
|
||||
if next_node_id == "start":
|
||||
yield from _publish_events(graph, next_node_id)
|
||||
|
||||
for edge in graph.edge_mapping.get(next_node_id, []):
|
||||
yield from _publish_events(graph, edge.target_node_id)
|
||||
|
||||
for edge in graph.edge_mapping.get(next_node_id, []):
|
||||
yield from _recursive_process(graph, edge.target_node_id)
|
||||
|
||||
|
||||
def _publish_events(graph: Graph, next_node_id: str) -> Generator[GraphEngineEvent, None, None]:
|
||||
route_node_state = RouteNodeState(node_id=next_node_id, start_at=datetime.now(timezone.utc).replace(tzinfo=None))
|
||||
|
||||
parallel_id = graph.node_parallel_mapping.get(next_node_id)
|
||||
parallel_start_node_id = None
|
||||
if parallel_id:
|
||||
parallel = graph.parallel_mapping.get(parallel_id)
|
||||
parallel_start_node_id = parallel.start_from_node_id if parallel else None
|
||||
|
||||
node_execution_id = str(uuid.uuid4())
|
||||
node_config = graph.node_id_config_mapping[next_node_id]
|
||||
node_type = NodeType.value_of(node_config.get("data", {}).get("type"))
|
||||
mock_node_data = StartNodeData(**{"title": "demo", "variables": []})
|
||||
|
||||
yield NodeRunStartedEvent(
|
||||
id=node_execution_id,
|
||||
node_id=next_node_id,
|
||||
node_type=node_type,
|
||||
node_data=mock_node_data,
|
||||
route_node_state=route_node_state,
|
||||
parallel_id=graph.node_parallel_mapping.get(next_node_id),
|
||||
parallel_start_node_id=parallel_start_node_id,
|
||||
)
|
||||
|
||||
if "llm" in next_node_id:
|
||||
length = int(next_node_id[-1])
|
||||
for i in range(0, length):
|
||||
yield NodeRunStreamChunkEvent(
|
||||
id=node_execution_id,
|
||||
node_id=next_node_id,
|
||||
node_type=node_type,
|
||||
node_data=mock_node_data,
|
||||
chunk_content=str(i),
|
||||
route_node_state=route_node_state,
|
||||
from_variable_selector=[next_node_id, "text"],
|
||||
parallel_id=parallel_id,
|
||||
parallel_start_node_id=parallel_start_node_id,
|
||||
)
|
||||
|
||||
route_node_state.status = RouteNodeState.Status.SUCCESS
|
||||
route_node_state.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
|
||||
yield NodeRunSucceededEvent(
|
||||
id=node_execution_id,
|
||||
node_id=next_node_id,
|
||||
node_type=node_type,
|
||||
node_data=mock_node_data,
|
||||
route_node_state=route_node_state,
|
||||
parallel_id=parallel_id,
|
||||
parallel_start_node_id=parallel_start_node_id,
|
||||
)
|
||||
|
||||
|
||||
def test_process():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm1-target",
|
||||
"source": "start",
|
||||
"target": "llm1",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm2-target",
|
||||
"source": "start",
|
||||
"target": "llm2",
|
||||
},
|
||||
{
|
||||
"id": "start-source-llm3-target",
|
||||
"source": "start",
|
||||
"target": "llm3",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-llm4-target",
|
||||
"source": "llm3",
|
||||
"target": "llm4",
|
||||
},
|
||||
{
|
||||
"id": "llm3-source-llm5-target",
|
||||
"source": "llm3",
|
||||
"target": "llm5",
|
||||
},
|
||||
{
|
||||
"id": "llm4-source-answer2-target",
|
||||
"source": "llm4",
|
||||
"target": "answer2",
|
||||
},
|
||||
{
|
||||
"id": "llm5-source-answer-target",
|
||||
"source": "llm5",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "answer2-source-answer-target",
|
||||
"source": "answer2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm2-source-answer-target",
|
||||
"source": "llm2",
|
||||
"target": "answer",
|
||||
},
|
||||
{
|
||||
"id": "llm1-source-answer-target",
|
||||
"source": "llm1",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm4",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm5",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer", "answer": "a{{#llm2.text#}}b"},
|
||||
"id": "answer",
|
||||
},
|
||||
{
|
||||
"data": {"type": "answer", "title": "answer2", "answer": "c{{#llm3.text#}}d"},
|
||||
"id": "answer2",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "what's the weather in SF",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "aaa",
|
||||
},
|
||||
user_inputs={},
|
||||
)
|
||||
|
||||
answer_stream_processor = AnswerStreamProcessor(graph=graph, variable_pool=variable_pool)
|
||||
|
||||
def graph_generator() -> Generator[GraphEngineEvent, None, None]:
|
||||
# print("")
|
||||
for event in _recursive_process(graph, "start"):
|
||||
# print("[ORIGIN]", event.__class__.__name__ + ":", event.route_node_state.node_id,
|
||||
# " " + (event.chunk_content if isinstance(event, NodeRunStreamChunkEvent) else ""))
|
||||
if isinstance(event, NodeRunSucceededEvent):
|
||||
if "llm" in event.route_node_state.node_id:
|
||||
variable_pool.add(
|
||||
[event.route_node_state.node_id, "text"],
|
||||
"".join(str(i) for i in range(0, int(event.route_node_state.node_id[-1]))),
|
||||
)
|
||||
yield event
|
||||
|
||||
result_generator = answer_stream_processor.process(graph_generator())
|
||||
stream_contents = ""
|
||||
for event in result_generator:
|
||||
# print("[ANSWER]", event.__class__.__name__ + ":", event.route_node_state.node_id,
|
||||
# " " + (event.chunk_content if isinstance(event, NodeRunStreamChunkEvent) else ""))
|
||||
if isinstance(event, NodeRunStreamChunkEvent):
|
||||
stream_contents += event.chunk_content
|
||||
pass
|
||||
|
||||
assert stream_contents == "c012da01b"
|
||||
@@ -0,0 +1,420 @@
|
||||
import time
|
||||
import uuid
|
||||
from unittest.mock import patch
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import NodeRunResult, UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.event import RunCompletedEvent
|
||||
from core.workflow.nodes.iteration.iteration_node import IterationNode
|
||||
from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
def test_run():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-pe-target",
|
||||
"source": "start",
|
||||
"target": "pe",
|
||||
},
|
||||
{
|
||||
"id": "iteration-1-source-answer-3-target",
|
||||
"source": "iteration-1",
|
||||
"target": "answer-3",
|
||||
},
|
||||
{
|
||||
"id": "tt-source-if-else-target",
|
||||
"source": "tt",
|
||||
"target": "if-else",
|
||||
},
|
||||
{
|
||||
"id": "if-else-true-answer-2-target",
|
||||
"source": "if-else",
|
||||
"sourceHandle": "true",
|
||||
"target": "answer-2",
|
||||
},
|
||||
{
|
||||
"id": "if-else-false-answer-4-target",
|
||||
"source": "if-else",
|
||||
"sourceHandle": "false",
|
||||
"target": "answer-4",
|
||||
},
|
||||
{
|
||||
"id": "pe-source-iteration-1-target",
|
||||
"source": "pe",
|
||||
"target": "iteration-1",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"title": "Start", "type": "start", "variables": []}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"iterator_selector": ["pe", "list_output"],
|
||||
"output_selector": ["tt", "output"],
|
||||
"output_type": "array[string]",
|
||||
"startNodeType": "template-transform",
|
||||
"start_node_id": "tt",
|
||||
"title": "iteration",
|
||||
"type": "iteration",
|
||||
},
|
||||
"id": "iteration-1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"answer": "{{#tt.output#}}",
|
||||
"iteration_id": "iteration-1",
|
||||
"title": "answer 2",
|
||||
"type": "answer",
|
||||
},
|
||||
"id": "answer-2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"iteration_id": "iteration-1",
|
||||
"template": "{{ arg1 }} 123",
|
||||
"title": "template transform",
|
||||
"type": "template-transform",
|
||||
"variables": [{"value_selector": ["sys", "query"], "variable": "arg1"}],
|
||||
},
|
||||
"id": "tt",
|
||||
},
|
||||
{
|
||||
"data": {"answer": "{{#iteration-1.output#}}88888", "title": "answer 3", "type": "answer"},
|
||||
"id": "answer-3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"conditions": [
|
||||
{
|
||||
"comparison_operator": "is",
|
||||
"id": "1721916275284",
|
||||
"value": "hi",
|
||||
"variable_selector": ["sys", "query"],
|
||||
}
|
||||
],
|
||||
"iteration_id": "iteration-1",
|
||||
"logical_operator": "and",
|
||||
"title": "if",
|
||||
"type": "if-else",
|
||||
},
|
||||
"id": "if-else",
|
||||
},
|
||||
{
|
||||
"data": {"answer": "no hi", "iteration_id": "iteration-1", "title": "answer 4", "type": "answer"},
|
||||
"id": "answer-4",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"instruction": "test1",
|
||||
"model": {
|
||||
"completion_params": {"temperature": 0.7},
|
||||
"mode": "chat",
|
||||
"name": "gpt-4o",
|
||||
"provider": "openai",
|
||||
},
|
||||
"parameters": [
|
||||
{"description": "test", "name": "list_output", "required": False, "type": "array[string]"}
|
||||
],
|
||||
"query": ["sys", "query"],
|
||||
"reasoning_mode": "prompt",
|
||||
"title": "pe",
|
||||
"type": "parameter-extractor",
|
||||
},
|
||||
"id": "pe",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.CHAT,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "dify",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "1",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["pe", "list_output"], ["dify-1", "dify-2"])
|
||||
|
||||
iteration_node = IterationNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"data": {
|
||||
"iterator_selector": ["pe", "list_output"],
|
||||
"output_selector": ["tt", "output"],
|
||||
"output_type": "array[string]",
|
||||
"startNodeType": "template-transform",
|
||||
"start_node_id": "tt",
|
||||
"title": "迭代",
|
||||
"type": "iteration",
|
||||
},
|
||||
"id": "iteration-1",
|
||||
},
|
||||
)
|
||||
|
||||
def tt_generator(self):
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
inputs={"iterator_selector": "dify"},
|
||||
outputs={"output": "dify 123"},
|
||||
)
|
||||
|
||||
# print("")
|
||||
|
||||
with patch.object(TemplateTransformNode, "_run", new=tt_generator):
|
||||
# execute node
|
||||
result = iteration_node._run()
|
||||
|
||||
count = 0
|
||||
for item in result:
|
||||
# print(type(item), item)
|
||||
count += 1
|
||||
if isinstance(item, RunCompletedEvent):
|
||||
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert item.run_result.outputs == {"output": ["dify 123", "dify 123"]}
|
||||
|
||||
assert count == 20
|
||||
|
||||
|
||||
def test_run_parallel():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-pe-target",
|
||||
"source": "start",
|
||||
"target": "pe",
|
||||
},
|
||||
{
|
||||
"id": "iteration-1-source-answer-3-target",
|
||||
"source": "iteration-1",
|
||||
"target": "answer-3",
|
||||
},
|
||||
{
|
||||
"id": "iteration-start-source-tt-target",
|
||||
"source": "iteration-start",
|
||||
"target": "tt",
|
||||
},
|
||||
{
|
||||
"id": "iteration-start-source-tt-2-target",
|
||||
"source": "iteration-start",
|
||||
"target": "tt-2",
|
||||
},
|
||||
{
|
||||
"id": "tt-source-if-else-target",
|
||||
"source": "tt",
|
||||
"target": "if-else",
|
||||
},
|
||||
{
|
||||
"id": "tt-2-source-if-else-target",
|
||||
"source": "tt-2",
|
||||
"target": "if-else",
|
||||
},
|
||||
{
|
||||
"id": "if-else-true-answer-2-target",
|
||||
"source": "if-else",
|
||||
"sourceHandle": "true",
|
||||
"target": "answer-2",
|
||||
},
|
||||
{
|
||||
"id": "if-else-false-answer-4-target",
|
||||
"source": "if-else",
|
||||
"sourceHandle": "false",
|
||||
"target": "answer-4",
|
||||
},
|
||||
{
|
||||
"id": "pe-source-iteration-1-target",
|
||||
"source": "pe",
|
||||
"target": "iteration-1",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"title": "Start", "type": "start", "variables": []}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"iterator_selector": ["pe", "list_output"],
|
||||
"output_selector": ["tt", "output"],
|
||||
"output_type": "array[string]",
|
||||
"startNodeType": "template-transform",
|
||||
"start_node_id": "iteration-start",
|
||||
"title": "iteration",
|
||||
"type": "iteration",
|
||||
},
|
||||
"id": "iteration-1",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"answer": "{{#tt.output#}}",
|
||||
"iteration_id": "iteration-1",
|
||||
"title": "answer 2",
|
||||
"type": "answer",
|
||||
},
|
||||
"id": "answer-2",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"iteration_id": "iteration-1",
|
||||
"title": "iteration-start",
|
||||
"type": "iteration-start",
|
||||
},
|
||||
"id": "iteration-start",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"iteration_id": "iteration-1",
|
||||
"template": "{{ arg1 }} 123",
|
||||
"title": "template transform",
|
||||
"type": "template-transform",
|
||||
"variables": [{"value_selector": ["sys", "query"], "variable": "arg1"}],
|
||||
},
|
||||
"id": "tt",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"iteration_id": "iteration-1",
|
||||
"template": "{{ arg1 }} 321",
|
||||
"title": "template transform",
|
||||
"type": "template-transform",
|
||||
"variables": [{"value_selector": ["sys", "query"], "variable": "arg1"}],
|
||||
},
|
||||
"id": "tt-2",
|
||||
},
|
||||
{
|
||||
"data": {"answer": "{{#iteration-1.output#}}88888", "title": "answer 3", "type": "answer"},
|
||||
"id": "answer-3",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"conditions": [
|
||||
{
|
||||
"comparison_operator": "is",
|
||||
"id": "1721916275284",
|
||||
"value": "hi",
|
||||
"variable_selector": ["sys", "query"],
|
||||
}
|
||||
],
|
||||
"iteration_id": "iteration-1",
|
||||
"logical_operator": "and",
|
||||
"title": "if",
|
||||
"type": "if-else",
|
||||
},
|
||||
"id": "if-else",
|
||||
},
|
||||
{
|
||||
"data": {"answer": "no hi", "iteration_id": "iteration-1", "title": "answer 4", "type": "answer"},
|
||||
"id": "answer-4",
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"instruction": "test1",
|
||||
"model": {
|
||||
"completion_params": {"temperature": 0.7},
|
||||
"mode": "chat",
|
||||
"name": "gpt-4o",
|
||||
"provider": "openai",
|
||||
},
|
||||
"parameters": [
|
||||
{"description": "test", "name": "list_output", "required": False, "type": "array[string]"}
|
||||
],
|
||||
"query": ["sys", "query"],
|
||||
"reasoning_mode": "prompt",
|
||||
"title": "pe",
|
||||
"type": "parameter-extractor",
|
||||
},
|
||||
"id": "pe",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.CHAT,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={
|
||||
SystemVariableKey.QUERY: "dify",
|
||||
SystemVariableKey.FILES: [],
|
||||
SystemVariableKey.CONVERSATION_ID: "abababa",
|
||||
SystemVariableKey.USER_ID: "1",
|
||||
},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["pe", "list_output"], ["dify-1", "dify-2"])
|
||||
|
||||
iteration_node = IterationNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"data": {
|
||||
"iterator_selector": ["pe", "list_output"],
|
||||
"output_selector": ["tt", "output"],
|
||||
"output_type": "array[string]",
|
||||
"startNodeType": "template-transform",
|
||||
"start_node_id": "iteration-start",
|
||||
"title": "迭代",
|
||||
"type": "iteration",
|
||||
},
|
||||
"id": "iteration-1",
|
||||
},
|
||||
)
|
||||
|
||||
def tt_generator(self):
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
inputs={"iterator_selector": "dify"},
|
||||
outputs={"output": "dify 123"},
|
||||
)
|
||||
|
||||
# print("")
|
||||
|
||||
with patch.object(TemplateTransformNode, "_run", new=tt_generator):
|
||||
# execute node
|
||||
result = iteration_node._run()
|
||||
|
||||
count = 0
|
||||
for item in result:
|
||||
# print(type(item), item)
|
||||
count += 1
|
||||
if isinstance(item, RunCompletedEvent):
|
||||
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert item.run_result.outputs == {"output": ["dify 123", "dify 123"]}
|
||||
|
||||
assert count == 32
|
||||
@@ -1,22 +1,70 @@
|
||||
import time
|
||||
import uuid
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.answer.answer_node import AnswerNode
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from extensions.ext_database import db
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
def test_execute_answer():
|
||||
node = AnswerNode(
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-answer-target",
|
||||
"source": "start",
|
||||
"target": "answer",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"title": "123",
|
||||
"type": "answer",
|
||||
"answer": "Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.",
|
||||
},
|
||||
"id": "answer",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[],
|
||||
)
|
||||
variable_pool.add(["start", "weather"], "sunny")
|
||||
variable_pool.add(["llm", "text"], "You are a helpful AI.")
|
||||
|
||||
node = AnswerNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "answer",
|
||||
"data": {
|
||||
@@ -27,20 +75,11 @@ def test_execute_answer():
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["start", "weather"], "sunny")
|
||||
pool.add(["llm", "text"], "You are a helpful AI.")
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs["answer"] == "Today's weather is sunny\nYou are a helpful AI.\n{{img}}\nFin."
|
||||
|
||||
@@ -1,22 +1,63 @@
|
||||
import time
|
||||
import uuid
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.if_else.if_else_node import IfElseNode
|
||||
from extensions.ext_database import db
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
|
||||
|
||||
|
||||
def test_execute_if_else_result_true():
|
||||
node = IfElseNode(
|
||||
graph_config = {"edges": [], "nodes": [{"data": {"type": "start"}, "id": "start"}]}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"}, user_inputs={}
|
||||
)
|
||||
pool.add(["start", "array_contains"], ["ab", "def"])
|
||||
pool.add(["start", "array_not_contains"], ["ac", "def"])
|
||||
pool.add(["start", "contains"], "cabcde")
|
||||
pool.add(["start", "not_contains"], "zacde")
|
||||
pool.add(["start", "start_with"], "abc")
|
||||
pool.add(["start", "end_with"], "zzab")
|
||||
pool.add(["start", "is"], "ab")
|
||||
pool.add(["start", "is_not"], "aab")
|
||||
pool.add(["start", "empty"], "")
|
||||
pool.add(["start", "not_empty"], "aaa")
|
||||
pool.add(["start", "equals"], 22)
|
||||
pool.add(["start", "not_equals"], 23)
|
||||
pool.add(["start", "greater_than"], 23)
|
||||
pool.add(["start", "less_than"], 21)
|
||||
pool.add(["start", "greater_than_or_equal"], 22)
|
||||
pool.add(["start", "less_than_or_equal"], 21)
|
||||
pool.add(["start", "not_null"], "1212")
|
||||
|
||||
node = IfElseNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "if-else",
|
||||
"data": {
|
||||
@@ -63,48 +104,64 @@ def test_execute_if_else_result_true():
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["start", "array_contains"], ["ab", "def"])
|
||||
pool.add(["start", "array_not_contains"], ["ac", "def"])
|
||||
pool.add(["start", "contains"], "cabcde")
|
||||
pool.add(["start", "not_contains"], "zacde")
|
||||
pool.add(["start", "start_with"], "abc")
|
||||
pool.add(["start", "end_with"], "zzab")
|
||||
pool.add(["start", "is"], "ab")
|
||||
pool.add(["start", "is_not"], "aab")
|
||||
pool.add(["start", "empty"], "")
|
||||
pool.add(["start", "not_empty"], "aaa")
|
||||
pool.add(["start", "equals"], 22)
|
||||
pool.add(["start", "not_equals"], 23)
|
||||
pool.add(["start", "greater_than"], 23)
|
||||
pool.add(["start", "less_than"], 21)
|
||||
pool.add(["start", "greater_than_or_equal"], 22)
|
||||
pool.add(["start", "less_than_or_equal"], 21)
|
||||
pool.add(["start", "not_null"], "1212")
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs["result"] is True
|
||||
|
||||
|
||||
def test_execute_if_else_result_false():
|
||||
node = IfElseNode(
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-llm-target",
|
||||
"source": "start",
|
||||
"target": "llm",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "llm",
|
||||
},
|
||||
"id": "llm",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["start", "array_contains"], ["1ab", "def"])
|
||||
pool.add(["start", "array_not_contains"], ["ab", "def"])
|
||||
|
||||
node = IfElseNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "if-else",
|
||||
"data": {
|
||||
@@ -127,20 +184,11 @@ def test_execute_if_else_result_false():
|
||||
},
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
pool = VariablePool(
|
||||
system_variables={SystemVariableKey.FILES: [], SystemVariableKey.USER_ID: "aaa"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
)
|
||||
pool.add(["start", "array_contains"], ["1ab", "def"])
|
||||
pool.add(["start", "array_not_contains"], ["ab", "def"])
|
||||
|
||||
# Mock db.session.close()
|
||||
db.session.close = MagicMock()
|
||||
|
||||
# execute node
|
||||
result = node._run(pool)
|
||||
result = node._run()
|
||||
|
||||
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
|
||||
assert result.outputs["result"] is False
|
||||
|
||||
@@ -1,17 +1,56 @@
|
||||
import time
|
||||
import uuid
|
||||
from unittest import mock
|
||||
from uuid import uuid4
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.app.segments import ArrayStringVariable, StringVariable
|
||||
from core.workflow.entities.node_entities import UserFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import SystemVariableKey
|
||||
from core.workflow.nodes.base_node import UserFrom
|
||||
from core.workflow.graph_engine.entities.graph import Graph
|
||||
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
|
||||
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
|
||||
from core.workflow.nodes.variable_assigner import VariableAssignerNode, WriteMode
|
||||
from models.workflow import WorkflowType
|
||||
|
||||
DEFAULT_NODE_ID = "node_id"
|
||||
|
||||
|
||||
def test_overwrite_string_variable():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-assigner-target",
|
||||
"source": "start",
|
||||
"target": "assigner",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "assigner",
|
||||
},
|
||||
"id": "assigner",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
conversation_variable = StringVariable(
|
||||
id=str(uuid4()),
|
||||
name="test_conversation_variable",
|
||||
@@ -24,13 +63,24 @@ def test_overwrite_string_variable():
|
||||
value="the second value",
|
||||
)
|
||||
|
||||
# construct variable pool
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.CONVERSATION_ID: "conversation_id"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[conversation_variable],
|
||||
)
|
||||
|
||||
variable_pool.add(
|
||||
[DEFAULT_NODE_ID, input_variable.name],
|
||||
input_variable,
|
||||
)
|
||||
|
||||
node = VariableAssignerNode(
|
||||
tenant_id="tenant_id",
|
||||
app_id="app_id",
|
||||
workflow_id="workflow_id",
|
||||
user_id="user_id",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "node_id",
|
||||
"data": {
|
||||
@@ -41,19 +91,8 @@ def test_overwrite_string_variable():
|
||||
},
|
||||
)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.CONVERSATION_ID: "conversation_id"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[conversation_variable],
|
||||
)
|
||||
variable_pool.add(
|
||||
[DEFAULT_NODE_ID, input_variable.name],
|
||||
input_variable,
|
||||
)
|
||||
|
||||
with mock.patch("core.workflow.nodes.variable_assigner.node.update_conversation_variable") as mock_run:
|
||||
node.run(variable_pool)
|
||||
list(node.run())
|
||||
mock_run.assert_called_once()
|
||||
|
||||
got = variable_pool.get(["conversation", conversation_variable.name])
|
||||
@@ -63,6 +102,39 @@ def test_overwrite_string_variable():
|
||||
|
||||
|
||||
def test_append_variable_to_array():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-assigner-target",
|
||||
"source": "start",
|
||||
"target": "assigner",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "assigner",
|
||||
},
|
||||
"id": "assigner",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
conversation_variable = ArrayStringVariable(
|
||||
id=str(uuid4()),
|
||||
name="test_conversation_variable",
|
||||
@@ -75,23 +147,6 @@ def test_append_variable_to_array():
|
||||
value="the second value",
|
||||
)
|
||||
|
||||
node = VariableAssignerNode(
|
||||
tenant_id="tenant_id",
|
||||
app_id="app_id",
|
||||
workflow_id="workflow_id",
|
||||
user_id="user_id",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
config={
|
||||
"id": "node_id",
|
||||
"data": {
|
||||
"assigned_variable_selector": ["conversation", conversation_variable.name],
|
||||
"write_mode": WriteMode.APPEND.value,
|
||||
"input_variable_selector": [DEFAULT_NODE_ID, input_variable.name],
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.CONVERSATION_ID: "conversation_id"},
|
||||
user_inputs={},
|
||||
@@ -103,8 +158,23 @@ def test_append_variable_to_array():
|
||||
input_variable,
|
||||
)
|
||||
|
||||
node = VariableAssignerNode(
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "node_id",
|
||||
"data": {
|
||||
"assigned_variable_selector": ["conversation", conversation_variable.name],
|
||||
"write_mode": WriteMode.APPEND.value,
|
||||
"input_variable_selector": [DEFAULT_NODE_ID, input_variable.name],
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
with mock.patch("core.workflow.nodes.variable_assigner.node.update_conversation_variable") as mock_run:
|
||||
node.run(variable_pool)
|
||||
list(node.run())
|
||||
mock_run.assert_called_once()
|
||||
|
||||
got = variable_pool.get(["conversation", conversation_variable.name])
|
||||
@@ -113,19 +183,57 @@ def test_append_variable_to_array():
|
||||
|
||||
|
||||
def test_clear_array():
|
||||
graph_config = {
|
||||
"edges": [
|
||||
{
|
||||
"id": "start-source-assigner-target",
|
||||
"source": "start",
|
||||
"target": "assigner",
|
||||
},
|
||||
],
|
||||
"nodes": [
|
||||
{"data": {"type": "start"}, "id": "start"},
|
||||
{
|
||||
"data": {
|
||||
"type": "assigner",
|
||||
},
|
||||
"id": "assigner",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
graph = Graph.init(graph_config=graph_config)
|
||||
|
||||
init_params = GraphInitParams(
|
||||
tenant_id="1",
|
||||
app_id="1",
|
||||
workflow_type=WorkflowType.WORKFLOW,
|
||||
workflow_id="1",
|
||||
graph_config=graph_config,
|
||||
user_id="1",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
call_depth=0,
|
||||
)
|
||||
|
||||
conversation_variable = ArrayStringVariable(
|
||||
id=str(uuid4()),
|
||||
name="test_conversation_variable",
|
||||
value=["the first value"],
|
||||
)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.CONVERSATION_ID: "conversation_id"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[conversation_variable],
|
||||
)
|
||||
|
||||
node = VariableAssignerNode(
|
||||
tenant_id="tenant_id",
|
||||
app_id="app_id",
|
||||
workflow_id="workflow_id",
|
||||
user_id="user_id",
|
||||
user_from=UserFrom.ACCOUNT,
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
id=str(uuid.uuid4()),
|
||||
graph_init_params=init_params,
|
||||
graph=graph,
|
||||
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
|
||||
config={
|
||||
"id": "node_id",
|
||||
"data": {
|
||||
@@ -136,14 +244,9 @@ def test_clear_array():
|
||||
},
|
||||
)
|
||||
|
||||
variable_pool = VariablePool(
|
||||
system_variables={SystemVariableKey.CONVERSATION_ID: "conversation_id"},
|
||||
user_inputs={},
|
||||
environment_variables=[],
|
||||
conversation_variables=[conversation_variable],
|
||||
)
|
||||
|
||||
node.run(variable_pool)
|
||||
with mock.patch("core.workflow.nodes.variable_assigner.node.update_conversation_variable") as mock_run:
|
||||
list(node.run())
|
||||
mock_run.assert_called_once()
|
||||
|
||||
got = variable_pool.get(["conversation", conversation_variable.name])
|
||||
assert got is not None
|
||||
|
||||
Reference in New Issue
Block a user