chore(api/tests): apply ruff reformat #7590 (#7591)

Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
Bowen Liang
2024-08-23 23:52:25 +08:00
committed by GitHub
parent 2da63654e5
commit b035c02f78
155 changed files with 4279 additions and 5925 deletions

View File

@@ -23,90 +23,71 @@ from tests.integration_tests.model_runtime.__mock.openai import setup_openai_moc
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm(setup_openai_mock):
node = LLMNode(
tenant_id='1',
app_id='1',
workflow_id='1',
user_id='1',
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
config={
'id': 'llm',
'data': {
'title': '123',
'type': 'llm',
'model': {
'provider': 'openai',
'name': 'gpt-3.5-turbo',
'mode': 'chat',
'completion_params': {}
},
'prompt_template': [
{
'role': 'system',
'text': 'you are a helpful assistant.\ntoday\'s weather is {{#abc.output#}}.'
},
{
'role': 'user',
'text': '{{#sys.query#}}'
}
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"prompt_template": [
{"role": "system", "text": "you are a helpful assistant.\ntoday's weather is {{#abc.output#}}."},
{"role": "user", "text": "{{#sys.query#}}"},
],
'memory': None,
'context': {
'enabled': False
},
'vision': {
'enabled': False
}
}
}
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
},
},
)
# construct variable pool
pool = VariablePool(system_variables={
SystemVariableKey.QUERY: 'what\'s the weather today?',
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: 'abababa',
SystemVariableKey.USER_ID: 'aaa'
}, user_inputs={}, environment_variables=[])
pool.add(['abc', 'output'], 'sunny')
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
pool.add(["abc", "output"], "sunny")
credentials = {
'openai_api_key': os.environ.get('OPENAI_API_KEY')
}
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance('openai')
provider_instance = ModelProviderFactory().get_provider_instance("openai")
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id='1',
tenant_id="1",
provider=provider_instance.get_provider_schema(),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(
enabled=False
),
custom_configuration=CustomConfiguration(
provider=CustomProviderConfiguration(
credentials=credentials
)
),
model_settings=[]
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
provider_instance=provider_instance,
model_type_instance=model_type_instance
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model='gpt-3.5-turbo')
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_config = ModelConfigWithCredentialsEntity(
model='gpt-3.5-turbo',
provider='openai',
mode='chat',
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema('gpt-3.5-turbo'),
provider_model_bundle=provider_model_bundle
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
provider_model_bundle=provider_model_bundle,
)
# Mock db.session.close()
@@ -118,112 +99,97 @@ def test_execute_llm(setup_openai_mock):
result = node.run(pool)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs['text'] is not None
assert result.outputs['usage']['total_tokens'] > 0
assert result.outputs["text"] is not None
assert result.outputs["usage"]["total_tokens"] > 0
@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True)
def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
"""
Test execute LLM node with jinja2
"""
node = LLMNode(
tenant_id='1',
app_id='1',
workflow_id='1',
user_id='1',
tenant_id="1",
app_id="1",
workflow_id="1",
user_id="1",
invoke_from=InvokeFrom.WEB_APP,
user_from=UserFrom.ACCOUNT,
config={
'id': 'llm',
'data': {
'title': '123',
'type': 'llm',
'model': {
'provider': 'openai',
'name': 'gpt-3.5-turbo',
'mode': 'chat',
'completion_params': {}
"id": "llm",
"data": {
"title": "123",
"type": "llm",
"model": {"provider": "openai", "name": "gpt-3.5-turbo", "mode": "chat", "completion_params": {}},
"prompt_config": {
"jinja2_variables": [
{"variable": "sys_query", "value_selector": ["sys", "query"]},
{"variable": "output", "value_selector": ["abc", "output"]},
]
},
'prompt_config': {
'jinja2_variables': [{
'variable': 'sys_query',
'value_selector': ['sys', 'query']
}, {
'variable': 'output',
'value_selector': ['abc', 'output']
}]
},
'prompt_template': [
"prompt_template": [
{
'role': 'system',
'text': 'you are a helpful assistant.\ntoday\'s weather is {{#abc.output#}}',
'jinja2_text': 'you are a helpful assistant.\ntoday\'s weather is {{output}}.',
'edition_type': 'jinja2'
"role": "system",
"text": "you are a helpful assistant.\ntoday's weather is {{#abc.output#}}",
"jinja2_text": "you are a helpful assistant.\ntoday's weather is {{output}}.",
"edition_type": "jinja2",
},
{
'role': 'user',
'text': '{{#sys.query#}}',
'jinja2_text': '{{sys_query}}',
'edition_type': 'basic'
}
"role": "user",
"text": "{{#sys.query#}}",
"jinja2_text": "{{sys_query}}",
"edition_type": "basic",
},
],
'memory': None,
'context': {
'enabled': False
},
'vision': {
'enabled': False
}
}
}
"memory": None,
"context": {"enabled": False},
"vision": {"enabled": False},
},
},
)
# construct variable pool
pool = VariablePool(system_variables={
SystemVariableKey.QUERY: 'what\'s the weather today?',
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: 'abababa',
SystemVariableKey.USER_ID: 'aaa'
}, user_inputs={}, environment_variables=[])
pool.add(['abc', 'output'], 'sunny')
pool = VariablePool(
system_variables={
SystemVariableKey.QUERY: "what's the weather today?",
SystemVariableKey.FILES: [],
SystemVariableKey.CONVERSATION_ID: "abababa",
SystemVariableKey.USER_ID: "aaa",
},
user_inputs={},
environment_variables=[],
)
pool.add(["abc", "output"], "sunny")
credentials = {
'openai_api_key': os.environ.get('OPENAI_API_KEY')
}
credentials = {"openai_api_key": os.environ.get("OPENAI_API_KEY")}
provider_instance = ModelProviderFactory().get_provider_instance('openai')
provider_instance = ModelProviderFactory().get_provider_instance("openai")
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
provider_model_bundle = ProviderModelBundle(
configuration=ProviderConfiguration(
tenant_id='1',
tenant_id="1",
provider=provider_instance.get_provider_schema(),
preferred_provider_type=ProviderType.CUSTOM,
using_provider_type=ProviderType.CUSTOM,
system_configuration=SystemConfiguration(
enabled=False
),
custom_configuration=CustomConfiguration(
provider=CustomProviderConfiguration(
credentials=credentials
)
),
model_settings=[]
system_configuration=SystemConfiguration(enabled=False),
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
model_settings=[],
),
provider_instance=provider_instance,
model_type_instance=model_type_instance,
)
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model='gpt-3.5-turbo')
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo")
model_config = ModelConfigWithCredentialsEntity(
model='gpt-3.5-turbo',
provider='openai',
mode='chat',
model="gpt-3.5-turbo",
provider="openai",
mode="chat",
credentials=credentials,
parameters={},
model_schema=model_type_instance.get_model_schema('gpt-3.5-turbo'),
provider_model_bundle=provider_model_bundle
model_schema=model_type_instance.get_model_schema("gpt-3.5-turbo"),
provider_model_bundle=provider_model_bundle,
)
# Mock db.session.close()
@@ -235,5 +201,5 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
result = node.run(pool)
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert 'sunny' in json.dumps(result.process_data)
assert 'what\'s the weather today?' in json.dumps(result.process_data)
assert "sunny" in json.dumps(result.process_data)
assert "what's the weather today?" in json.dumps(result.process_data)