mirror of
http://112.124.100.131/huang.ze/ebiz-dify-ai.git
synced 2025-12-10 19:36:53 +08:00
chore: apply ruff's pyflakes linter rules (#2420)
This commit is contained in:
@@ -30,7 +30,7 @@ class LoggingCallback(Callback):
|
||||
"""
|
||||
self.print_text("\n[on_llm_before_invoke]\n", color='blue')
|
||||
self.print_text(f"Model: {model}\n", color='blue')
|
||||
self.print_text(f"Parameters:\n", color='blue')
|
||||
self.print_text("Parameters:\n", color='blue')
|
||||
for key, value in model_parameters.items():
|
||||
self.print_text(f"\t{key}: {value}\n", color='blue')
|
||||
|
||||
@@ -38,7 +38,7 @@ class LoggingCallback(Callback):
|
||||
self.print_text(f"\tstop: {stop}\n", color='blue')
|
||||
|
||||
if tools:
|
||||
self.print_text(f"\tTools:\n", color='blue')
|
||||
self.print_text("\tTools:\n", color='blue')
|
||||
for tool in tools:
|
||||
self.print_text(f"\t\t{tool.name}\n", color='blue')
|
||||
|
||||
@@ -47,7 +47,7 @@ class LoggingCallback(Callback):
|
||||
if user:
|
||||
self.print_text(f"User: {user}\n", color='blue')
|
||||
|
||||
self.print_text(f"Prompt messages:\n", color='blue')
|
||||
self.print_text("Prompt messages:\n", color='blue')
|
||||
for prompt_message in prompt_messages:
|
||||
if prompt_message.name:
|
||||
self.print_text(f"\tname: {prompt_message.name}\n", color='blue')
|
||||
@@ -101,7 +101,7 @@ class LoggingCallback(Callback):
|
||||
self.print_text(f"Content: {result.message.content}\n", color='yellow')
|
||||
|
||||
if result.message.tool_calls:
|
||||
self.print_text(f"Tool calls:\n", color='yellow')
|
||||
self.print_text("Tool calls:\n", color='yellow')
|
||||
for tool_call in result.message.tool_calls:
|
||||
self.print_text(f"\t{tool_call.id}\n", color='yellow')
|
||||
self.print_text(f"\t{tool_call.function.name}\n", color='yellow')
|
||||
|
||||
@@ -110,7 +110,7 @@ class BaichuanLarguageModel(LargeLanguageModel):
|
||||
stop: List[str] | None = None, stream: bool = True, user: str | None = None) \
|
||||
-> LLMResult | Generator:
|
||||
if tools is not None and len(tools) > 0:
|
||||
raise InvokeBadRequestError(f"Baichuan model doesn't support tools")
|
||||
raise InvokeBadRequestError("Baichuan model doesn't support tools")
|
||||
|
||||
instance = BaichuanModel(
|
||||
api_key=credentials['api_key'],
|
||||
|
||||
@@ -146,16 +146,16 @@ class OAIAPICompatLargeLanguageModel(_CommonOAI_API_Compat, LargeLanguageModel):
|
||||
try:
|
||||
json_result = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
raise CredentialsValidateFailedError(f'Credentials validation failed: JSON decode error')
|
||||
raise CredentialsValidateFailedError('Credentials validation failed: JSON decode error')
|
||||
|
||||
if (completion_type is LLMMode.CHAT
|
||||
and ('object' not in json_result or json_result['object'] != 'chat.completion')):
|
||||
raise CredentialsValidateFailedError(
|
||||
f'Credentials validation failed: invalid response object, must be \'chat.completion\'')
|
||||
'Credentials validation failed: invalid response object, must be \'chat.completion\'')
|
||||
elif (completion_type is LLMMode.COMPLETION
|
||||
and ('object' not in json_result or json_result['object'] != 'text_completion')):
|
||||
raise CredentialsValidateFailedError(
|
||||
f'Credentials validation failed: invalid response object, must be \'text_completion\'')
|
||||
'Credentials validation failed: invalid response object, must be \'text_completion\'')
|
||||
except CredentialsValidateFailedError:
|
||||
raise
|
||||
except Exception as ex:
|
||||
|
||||
@@ -179,11 +179,11 @@ class OAICompatEmbeddingModel(_CommonOAI_API_Compat, TextEmbeddingModel):
|
||||
try:
|
||||
json_result = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
raise CredentialsValidateFailedError(f'Credentials validation failed: JSON decode error')
|
||||
raise CredentialsValidateFailedError('Credentials validation failed: JSON decode error')
|
||||
|
||||
if 'model' not in json_result:
|
||||
raise CredentialsValidateFailedError(
|
||||
f'Credentials validation failed: invalid response')
|
||||
'Credentials validation failed: invalid response')
|
||||
except CredentialsValidateFailedError:
|
||||
raise
|
||||
except Exception as ex:
|
||||
|
||||
@@ -231,15 +231,15 @@ class ErnieBotModel(object):
|
||||
# so, we just disable function calling for now.
|
||||
|
||||
if tools is not None and len(tools) > 0:
|
||||
raise BadRequestError(f'function calling is not supported yet.')
|
||||
raise BadRequestError('function calling is not supported yet.')
|
||||
|
||||
if stop is not None:
|
||||
if len(stop) > 4:
|
||||
raise BadRequestError(f'stop list should not exceed 4 items.')
|
||||
raise BadRequestError('stop list should not exceed 4 items.')
|
||||
|
||||
for s in stop:
|
||||
if len(s) > 20:
|
||||
raise BadRequestError(f'stop item should not exceed 20 characters.')
|
||||
raise BadRequestError('stop item should not exceed 20 characters.')
|
||||
|
||||
def _build_request_body(self, model: str, messages: List[ErnieMessage], stream: bool, parameters: Dict[str, Any],
|
||||
tools: List[PromptMessageTool], stop: List[str], user: str) -> Dict[str, Any]:
|
||||
@@ -252,9 +252,9 @@ class ErnieBotModel(object):
|
||||
stop: List[str], user: str) \
|
||||
-> Dict[str, Any]:
|
||||
if len(messages) % 2 == 0:
|
||||
raise BadRequestError(f'The number of messages should be odd.')
|
||||
raise BadRequestError('The number of messages should be odd.')
|
||||
if messages[0].role == 'function':
|
||||
raise BadRequestError(f'The first message should be user message.')
|
||||
raise BadRequestError('The first message should be user message.')
|
||||
|
||||
"""
|
||||
TODO: implement function calling
|
||||
@@ -264,7 +264,7 @@ class ErnieBotModel(object):
|
||||
parameters: Dict[str, Any], stop: List[str], user: str) \
|
||||
-> Dict[str, Any]:
|
||||
if len(messages) == 0:
|
||||
raise BadRequestError(f'The number of messages should not be zero.')
|
||||
raise BadRequestError('The number of messages should not be zero.')
|
||||
|
||||
# check if the first element is system, shift it
|
||||
system_message = ''
|
||||
@@ -273,9 +273,9 @@ class ErnieBotModel(object):
|
||||
system_message = message.content
|
||||
|
||||
if len(messages) % 2 == 0:
|
||||
raise BadRequestError(f'The number of messages should be odd.')
|
||||
raise BadRequestError('The number of messages should be odd.')
|
||||
if messages[0].role != 'user':
|
||||
raise BadRequestError(f'The first message should be user message.')
|
||||
raise BadRequestError('The first message should be user message.')
|
||||
body = {
|
||||
'messages': [message.to_dict() for message in messages],
|
||||
'stream': stream,
|
||||
|
||||
@@ -37,7 +37,7 @@ class ZhipuAI(HttpClient):
|
||||
if base_url is None:
|
||||
base_url = os.environ.get("ZHIPUAI_BASE_URL")
|
||||
if base_url is None:
|
||||
base_url = f"https://open.bigmodel.cn/api/paas/v4"
|
||||
base_url = "https://open.bigmodel.cn/api/paas/v4"
|
||||
from .__version__ import __version__
|
||||
super().__init__(
|
||||
version=__version__,
|
||||
|
||||
Reference in New Issue
Block a user