chore: apply flake8-comprehensions Ruff rules to improve collection comprehensions (#5652)

Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
Bowen Liang
2024-06-27 11:21:31 +08:00
committed by GitHub
parent 2e718b85e9
commit dcb72e0067
58 changed files with 123 additions and 136 deletions

View File

@@ -112,7 +112,7 @@ def test_execute_llm(setup_openai_mock):
# Mock db.session.close()
db.session.close = MagicMock()
node._fetch_model_config = MagicMock(return_value=tuple([model_instance, model_config]))
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
# execute node
result = node.run(pool)
@@ -229,7 +229,7 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
# Mock db.session.close()
db.session.close = MagicMock()
node._fetch_model_config = MagicMock(return_value=tuple([model_instance, model_config]))
node._fetch_model_config = MagicMock(return_value=(model_instance, model_config))
# execute node
result = node.run(pool)

View File

@@ -59,7 +59,7 @@ def get_mocked_fetch_model_config(
provider_model_bundle=provider_model_bundle
)
return MagicMock(return_value=tuple([model_instance, model_config]))
return MagicMock(return_value=(model_instance, model_config))
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
def test_function_calling_parameter_extractor(setup_openai_mock):

View File

@@ -238,8 +238,8 @@ def test__get_completion_model_prompt_messages():
prompt_rules = prompt_template['prompt_rules']
full_inputs = {**inputs, '#context#': context, '#query#': query, '#histories#': memory.get_history_prompt_text(
max_token_limit=2000,
human_prefix=prompt_rules['human_prefix'] if 'human_prefix' in prompt_rules else 'Human',
ai_prefix=prompt_rules['assistant_prefix'] if 'assistant_prefix' in prompt_rules else 'Assistant'
human_prefix=prompt_rules.get("human_prefix", "Human"),
ai_prefix=prompt_rules.get("assistant_prefix", "Assistant")
)}
real_prompt = prompt_template['prompt_template'].format(full_inputs)