feat: Parallel Execution of Nodes in Workflows (#8192)

Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: Yi <yxiaoisme@gmail.com>
Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
takatost
2024-09-10 15:23:16 +08:00
committed by GitHub
parent 5da0182800
commit dabfd74622
156 changed files with 11158 additions and 5605 deletions

View File

@@ -1,5 +1,8 @@
import json
import os
import time
import uuid
from collections.abc import Generator
from unittest.mock import MagicMock
import pytest
@@ -10,28 +13,77 @@ from core.entities.provider_entities import CustomConfiguration, CustomProviderC
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers import ModelProviderFactory
from core.workflow.entities.node_entities import UserFrom
from core.workflow.entities.variable_pool import VariablePool
from core.workflow.enums import SystemVariableKey
from core.workflow.nodes.base_node import UserFrom
from core.workflow.graph_engine.entities.graph import Graph
from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams
from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState
from core.workflow.nodes.event import RunCompletedEvent
from core.workflow.nodes.llm.llm_node import LLMNode
from extensions.ext_database import db
from models.provider import ProviderType
from models.workflow import WorkflowNodeExecutionStatus
from models.workflow import WorkflowNodeExecutionStatus, WorkflowType
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
node = LLMNode(
def init_llm_node(config: dict) -> LLMNode:
graph_config = {
"edges": [
{
"id": "start-source-next-target",
"source": "start",
"target": "llm",
},
],
"nodes": [{"data": {"type": "start"}, "id": "start"}, config],
}
graph = Graph.init(graph_config=graph_config)
init_params = GraphInitParams(
tenant_id="1",
app_id="1",
workflow_type=WorkflowType.WORKFLOW,
workflow_id="1",
graph_config=graph_config,
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
# construct variable pool
variable_pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["abc", "output"], "sunny")
node = LLMNode(
id=str(uuid.uuid4()),
graph_init_params=init_params,
graph=graph,
graph_runtime_state=GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()),
config=config,
)
return node
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
node = init_llm_node(
config={
"id": "llm",
"data": {
@@ -49,19 +101,6 @@ def test_execute_llm(setup_openai_mock):
},
)
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
pool.add(["abc", "output"], "sunny")
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance("openai")
@@ -80,13 +119,15 @@ def test_execute_llm(setup_openai_mock):
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
@@ -96,11 +137,16 @@ def test_execute_llm(setup_openai_mock):
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
# execute node
result = node.run(pool)
result = node._run()
assert isinstance(result, Generator)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs["text"] is not None
assert result.outputs["usage"]["total_tokens"] > 0
for item in result:
if isinstance(item, RunCompletedEvent):
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert item.run_result.process_data is not None
assert item.run_result.outputs is not None
assert item.run_result.outputs.get("text") is not None
assert item.run_result.outputs.get("usage", {})["total_tokens"] > 0
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
@@ -109,13 +155,7 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
"""
Test execute LLM node with jinja2
"""
node = LLMNode(
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
node = init_llm_node(
config={
"id": "llm",
"data": {
@@ -149,19 +189,6 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
},
)
# construct variable pool
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
pool.add(["abc", "output"], "sunny")
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance("openai")
@@ -181,14 +208,15 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_schema = model_type_instance.get_model_schema("gpt-3.5-turbo")
assert model_schema is not None
model_config = ModelConfigWithCredentialsEntity(
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
model_schema=model_schema,
provider_model_bundle=provider_model_bundle,
)
@@ -198,8 +226,11 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
# execute node
result = node.run(pool)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert "sunny" in json.dumps(result.process_data)
assert "what's the weather today?" in json.dumps(result.process_data)
for item in result:
if isinstance(item, RunCompletedEvent):
assert item.run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert item.run_result.process_data is not None
assert "sunny" in json.dumps(item.run_result.process_data)
assert "what's the weather today?" in json.dumps(item.run_result.process_data)