chore: refurbish Python code by applying refurb linter rules (#8296)

This commit is contained in:
Bowen Liang
2024-09-12 15:50:49 +08:00
committed by GitHub
parent c69f5b07ba
commit 40fb4d16ef
105 changed files with 220 additions and 276 deletions

View File

@@ -449,7 +449,7 @@ if you are not sure about the structure.
model=real_model,
prompt_messages=prompt_messages,
message=prompt_message,
usage=usage if usage else LLMUsage.empty_usage(),
usage=usage or LLMUsage.empty_usage(),
system_fingerprint=system_fingerprint,
),
credentials=credentials,

View File

@@ -409,7 +409,7 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
),
)
elif isinstance(chunk, ContentBlockDeltaEvent):
chunk_text = chunk.delta.text if chunk.delta.text else ""
chunk_text = chunk.delta.text or ""
full_assistant_content += chunk_text
# transform assistant message to prompt message

View File

@@ -213,7 +213,7 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
model=real_model,
prompt_messages=prompt_messages,
message=prompt_message,
usage=usage if usage else LLMUsage.empty_usage(),
usage=usage or LLMUsage.empty_usage(),
system_fingerprint=system_fingerprint,
),
credentials=credentials,

View File

@@ -225,7 +225,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
continue
# transform assistant message to prompt message
text = delta.text if delta.text else ""
text = delta.text or ""
assistant_prompt_message = AssistantPromptMessage(content=text)
full_text += text
@@ -400,15 +400,13 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
continue
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls
)
assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls)
full_assistant_content += delta.delta.content if delta.delta.content else ""
full_assistant_content += delta.delta.content or ""
real_model = chunk.model
system_fingerprint = chunk.system_fingerprint
completion += delta.delta.content if delta.delta.content else ""
completion += delta.delta.content or ""
yield LLMResultChunk(
model=real_model,

View File

@@ -84,7 +84,7 @@ class AzureOpenAIText2SpeechModel(_CommonAzureOpenAI, TTSModel):
)
for i in range(len(sentences))
]
for index, future in enumerate(futures):
for future in futures:
yield from future.result().__enter__().iter_bytes(1024)
else:

View File

@@ -331,10 +331,10 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
elif "contentBlockDelta" in chunk:
delta = chunk["contentBlockDelta"]["delta"]
if "text" in delta:
chunk_text = delta["text"] if delta["text"] else ""
chunk_text = delta["text"] or ""
full_assistant_content += chunk_text
assistant_prompt_message = AssistantPromptMessage(
content=chunk_text if chunk_text else "",
content=chunk_text or "",
)
index = chunk["contentBlockDelta"]["contentBlockIndex"]
yield LLMResultChunk(
@@ -751,7 +751,7 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
elif model_prefix == "cohere":
output = response_body.get("generations")[0].get("text")
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
completion_tokens = self.get_num_tokens(model, credentials, output if output else "")
completion_tokens = self.get_num_tokens(model, credentials, output or "")
else:
raise ValueError(f"Got unknown model prefix {model_prefix} when handling block response")
@@ -828,7 +828,7 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=content_delta if content_delta else "",
content=content_delta or "",
)
index += 1

View File

@@ -302,11 +302,11 @@ class ChatGLMLargeLanguageModel(LargeLanguageModel):
if delta.delta.function_call:
function_calls = [delta.delta.function_call]
assistant_message_tool_calls = self._extract_response_tool_calls(function_calls if function_calls else [])
assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or [])
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls
content=delta.delta.content or "", tool_calls=assistant_message_tool_calls
)
if delta.finish_reason is not None:

View File

@@ -511,7 +511,7 @@ class LocalAILanguageModel(LargeLanguageModel):
delta = chunk.choices[0]
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(content=delta.text if delta.text else "", tool_calls=[])
assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[])
if delta.finish_reason is not None:
# temp_assistant_prompt_message is used to calculate usage
@@ -578,11 +578,11 @@ class LocalAILanguageModel(LargeLanguageModel):
if delta.delta.function_call:
function_calls = [delta.delta.function_call]
assistant_message_tool_calls = self._extract_response_tool_calls(function_calls if function_calls else [])
assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or [])
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls
content=delta.delta.content or "", tool_calls=assistant_message_tool_calls
)
if delta.finish_reason is not None:

View File

@@ -211,7 +211,7 @@ class MinimaxLargeLanguageModel(LargeLanguageModel):
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
usage=usage,
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)
elif message.function_call:
@@ -244,7 +244,7 @@ class MinimaxLargeLanguageModel(LargeLanguageModel):
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)

View File

@@ -65,7 +65,7 @@ class OllamaEmbeddingModel(TextEmbeddingModel):
inputs = []
used_tokens = 0
for i, text in enumerate(texts):
for text in texts:
# Here token count is only an approximation based on the GPT2 tokenizer
num_tokens = self._get_num_tokens_by_gpt2(text)

View File

@@ -508,7 +508,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
continue
# transform assistant message to prompt message
text = delta.text if delta.text else ""
text = delta.text or ""
assistant_prompt_message = AssistantPromptMessage(content=text)
full_text += text
@@ -760,11 +760,9 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
final_tool_calls.extend(tool_calls)
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls
)
assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls)
full_assistant_content += delta.delta.content if delta.delta.content else ""
full_assistant_content += delta.delta.content or ""
if has_finish_reason:
final_chunk = LLMResultChunk(

View File

@@ -88,7 +88,7 @@ class OpenAIText2SpeechModel(_CommonOpenAI, TTSModel):
)
for i in range(len(sentences))
]
for index, future in enumerate(futures):
for future in futures:
yield from future.result().__enter__().iter_bytes(1024)
else:

View File

@@ -179,9 +179,9 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
features = []
function_calling_type = credentials.get("function_calling_type", "no_call")
if function_calling_type in ["function_call"]:
if function_calling_type == "function_call":
features.append(ModelFeature.TOOL_CALL)
elif function_calling_type in ["tool_call"]:
elif function_calling_type == "tool_call":
features.append(ModelFeature.MULTI_TOOL_CALL)
stream_function_calling = credentials.get("stream_function_calling", "supported")

View File

@@ -179,7 +179,7 @@ class OpenLLMLargeLanguageModel(LargeLanguageModel):
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
usage=usage,
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)
else:
@@ -189,7 +189,7 @@ class OpenLLMLargeLanguageModel(LargeLanguageModel):
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)

View File

@@ -106,7 +106,7 @@ class OpenLLMGenerate:
timeout = 120
data = {
"stop": stop if stop else [],
"stop": stop or [],
"prompt": "\n".join([message.content for message in prompt_messages]),
"llm_config": default_llm_config,
}

View File

@@ -214,7 +214,7 @@ class ReplicateLargeLanguageModel(_CommonReplicate, LargeLanguageModel):
index += 1
assistant_prompt_message = AssistantPromptMessage(content=output if output else "")
assistant_prompt_message = AssistantPromptMessage(content=output or "")
if index < prediction_output_length:
yield LLMResultChunk(

View File

@@ -1,5 +1,6 @@
import json
import logging
import operator
from typing import Any, Optional
import boto3
@@ -94,7 +95,7 @@ class SageMakerRerankModel(RerankModel):
for idx in range(len(scores)):
candidate_docs.append({"content": docs[idx], "score": scores[idx]})
sorted(candidate_docs, key=lambda x: x["score"], reverse=True)
sorted(candidate_docs, key=operator.itemgetter("score"), reverse=True)
line = 3
rerank_documents = []

View File

@@ -260,7 +260,7 @@ class SageMakerText2SpeechModel(TTSModel):
for payload in payloads
]
for index, future in enumerate(futures):
for future in futures:
resp = future.result()
audio_bytes = requests.get(resp.get("s3_presign_url")).content
for i in range(0, len(audio_bytes), 1024):

View File

@@ -220,7 +220,7 @@ class SparkLargeLanguageModel(LargeLanguageModel):
delta = content
assistant_prompt_message = AssistantPromptMessage(
content=delta if delta else "",
content=delta or "",
)
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)

View File

@@ -1,6 +1,7 @@
import base64
import hashlib
import hmac
import operator
import time
import requests
@@ -127,7 +128,7 @@ class FlashRecognizer:
return s
def _build_req_with_signature(self, secret_key, params, header):
query = sorted(params.items(), key=lambda d: d[0])
query = sorted(params.items(), key=operator.itemgetter(0))
signstr = self._format_sign_string(query)
signature = self._sign(signstr, secret_key)
header["Authorization"] = signature

View File

@@ -4,6 +4,7 @@ import tempfile
import uuid
from collections.abc import Generator
from http import HTTPStatus
from pathlib import Path
from typing import Optional, Union, cast
from dashscope import Generation, MultiModalConversation, get_tokenizer
@@ -454,8 +455,7 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
file_path = os.path.join(temp_dir, f"{uuid.uuid4()}.{mime_type.split('/')[1]}")
with open(file_path, "wb") as image_file:
image_file.write(base64.b64decode(encoded_string))
Path(file_path).write_bytes(base64.b64decode(encoded_string))
return f"file://{file_path}"

View File

@@ -368,11 +368,9 @@ class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel):
final_tool_calls.extend(tool_calls)
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=tool_calls
)
assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls)
full_assistant_content += delta.delta.content if delta.delta.content else ""
full_assistant_content += delta.delta.content or ""
if has_finish_reason:
final_chunk = LLMResultChunk(

View File

@@ -231,10 +231,10 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
),
)
elif isinstance(chunk, ContentBlockDeltaEvent):
chunk_text = chunk.delta.text if chunk.delta.text else ""
chunk_text = chunk.delta.text or ""
full_assistant_content += chunk_text
assistant_prompt_message = AssistantPromptMessage(
content=chunk_text if chunk_text else "",
content=chunk_text or "",
)
index = chunk.index
yield LLMResultChunk(

View File

@@ -1,5 +1,6 @@
# coding : utf-8
import datetime
from itertools import starmap
import pytz
@@ -48,7 +49,7 @@ class SignResult:
self.authorization = ""
def __str__(self):
return "\n".join(["{}:{}".format(*item) for item in self.__dict__.items()])
return "\n".join(list(starmap("{}:{}".format, self.__dict__.items())))
class Credentials:

View File

@@ -1,5 +1,6 @@
import hashlib
import hmac
import operator
from functools import reduce
from urllib.parse import quote
@@ -40,4 +41,4 @@ class Util:
if len(hv) == 1:
hv = "0" + hv
lst.append(hv)
return reduce(lambda x, y: x + y, lst)
return reduce(operator.add, lst)

View File

@@ -174,9 +174,7 @@ class VolcengineMaaSLargeLanguageModel(LargeLanguageModel):
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=index,
message=AssistantPromptMessage(
content=message["content"] if message["content"] else "", tool_calls=[]
),
message=AssistantPromptMessage(content=message["content"] or "", tool_calls=[]),
usage=usage,
finish_reason=choice.get("finish_reason"),
),
@@ -208,7 +206,7 @@ class VolcengineMaaSLargeLanguageModel(LargeLanguageModel):
model=model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(
content=message["content"] if message["content"] else "",
content=message["content"] or "",
tool_calls=tool_calls,
),
usage=self._calc_response_usage(
@@ -284,7 +282,7 @@ class VolcengineMaaSLargeLanguageModel(LargeLanguageModel):
model=model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(
content=message.content if message.content else "",
content=message.content or "",
tool_calls=tool_calls,
),
usage=self._calc_response_usage(

View File

@@ -199,7 +199,7 @@ class ErnieBotLargeLanguageModel(LargeLanguageModel):
secret_key=credentials["secret_key"],
)
user = user if user else "ErnieBotDefault"
user = user or "ErnieBotDefault"
# convert prompt messages to baichuan messages
messages = [
@@ -289,7 +289,7 @@ class ErnieBotLargeLanguageModel(LargeLanguageModel):
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
usage=usage,
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)
else:
@@ -299,7 +299,7 @@ class ErnieBotLargeLanguageModel(LargeLanguageModel):
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=message.content, tool_calls=[]),
finish_reason=message.stop_reason if message.stop_reason else None,
finish_reason=message.stop_reason or None,
),
)

View File

@@ -85,7 +85,7 @@ class WenxinTextEmbeddingModel(TextEmbeddingModel):
api_key = credentials["api_key"]
secret_key = credentials["secret_key"]
embedding: TextEmbedding = self._create_text_embedding(api_key, secret_key)
user = user if user else "ErnieBotDefault"
user = user or "ErnieBotDefault"
context_size = self._get_context_size(model, credentials)
max_chunks = self._get_max_chunks(model, credentials)

View File

@@ -589,7 +589,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
# convert tool call to assistant message tool call
tool_calls = assistant_message.tool_calls
assistant_prompt_message_tool_calls = self._extract_response_tool_calls(tool_calls if tool_calls else [])
assistant_prompt_message_tool_calls = self._extract_response_tool_calls(tool_calls or [])
function_call = assistant_message.function_call
if function_call:
assistant_prompt_message_tool_calls += [self._extract_response_function_call(function_call)]
@@ -652,7 +652,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_message_tool_calls
content=delta.delta.content or "", tool_calls=assistant_message_tool_calls
)
if delta.finish_reason is not None:
@@ -749,7 +749,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
delta = chunk.choices[0]
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(content=delta.text if delta.text else "", tool_calls=[])
assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[])
if delta.finish_reason is not None:
# temp_assistant_prompt_message is used to calculate usage

View File

@@ -215,7 +215,7 @@ class XinferenceText2SpeechModel(TTSModel):
for i in range(len(sentences))
]
for index, future in enumerate(futures):
for future in futures:
response = future.result()
for i in range(0, len(response), 1024):
yield response[i : i + 1024]

View File

@@ -414,10 +414,10 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta.delta.content if delta.delta.content else "", tool_calls=assistant_tool_calls
content=delta.delta.content or "", tool_calls=assistant_tool_calls
)
full_assistant_content += delta.delta.content if delta.delta.content else ""
full_assistant_content += delta.delta.content or ""
if delta.finish_reason is not None and chunk.usage is not None:
completion_tokens = chunk.usage.completion_tokens

View File

@@ -30,6 +30,8 @@ def _merge_map(map1: Mapping, map2: Mapping) -> Mapping:
return {key: val for key, val in merged.items() if val is not None}
from itertools import starmap
from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
ZHIPUAI_DEFAULT_TIMEOUT = httpx.Timeout(timeout=300.0, connect=8.0)
@@ -159,7 +161,7 @@ class HttpClient:
return [(key, str_data)]
def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
items = flatten([self._object_to_formdata(k, v) for k, v in data.items()])
items = flatten(list(starmap(self._object_to_formdata, data.items())))
serialized: dict[str, object] = {}
for key, value in items: