chore(api/core): apply ruff reformatting (#7624)

This commit is contained in:
Bowen Liang
2024-09-10 17:00:20 +08:00
committed by GitHub
parent 178730266d
commit 2cf1187b32
724 changed files with 21180 additions and 21123 deletions

View File

@@ -22,15 +22,19 @@ class ChatAppConfig(EasyUIBasedAppConfig):
"""
Chatbot App Config Entity.
"""
pass
class ChatAppConfigManager(BaseAppConfigManager):
@classmethod
def get_app_config(cls, app_model: App,
app_model_config: AppModelConfig,
conversation: Optional[Conversation] = None,
override_config_dict: Optional[dict] = None) -> ChatAppConfig:
def get_app_config(
cls,
app_model: App,
app_model_config: AppModelConfig,
conversation: Optional[Conversation] = None,
override_config_dict: Optional[dict] = None,
) -> ChatAppConfig:
"""
Convert app model config to chat app config
:param app_model: app model
@@ -51,7 +55,7 @@ class ChatAppConfigManager(BaseAppConfigManager):
config_dict = app_model_config_dict.copy()
else:
if not override_config_dict:
raise Exception('override_config_dict is required when config_from is ARGS')
raise Exception("override_config_dict is required when config_from is ARGS")
config_dict = override_config_dict
@@ -63,19 +67,11 @@ class ChatAppConfigManager(BaseAppConfigManager):
app_model_config_from=config_from,
app_model_config_id=app_model_config.id,
app_model_config_dict=config_dict,
model=ModelConfigManager.convert(
config=config_dict
),
prompt_template=PromptTemplateConfigManager.convert(
config=config_dict
),
sensitive_word_avoidance=SensitiveWordAvoidanceConfigManager.convert(
config=config_dict
),
dataset=DatasetConfigManager.convert(
config=config_dict
),
additional_features=cls.convert_features(config_dict, app_mode)
model=ModelConfigManager.convert(config=config_dict),
prompt_template=PromptTemplateConfigManager.convert(config=config_dict),
sensitive_word_avoidance=SensitiveWordAvoidanceConfigManager.convert(config=config_dict),
dataset=DatasetConfigManager.convert(config=config_dict),
additional_features=cls.convert_features(config_dict, app_mode),
)
app_config.variables, app_config.external_data_variables = BasicVariablesConfigManager.convert(
@@ -113,8 +109,9 @@ class ChatAppConfigManager(BaseAppConfigManager):
related_config_keys.extend(current_related_config_keys)
# dataset_query_variable
config, current_related_config_keys = DatasetConfigManager.validate_and_set_defaults(tenant_id, app_mode,
config)
config, current_related_config_keys = DatasetConfigManager.validate_and_set_defaults(
tenant_id, app_mode, config
)
related_config_keys.extend(current_related_config_keys)
# opening_statement
@@ -123,7 +120,8 @@ class ChatAppConfigManager(BaseAppConfigManager):
# suggested_questions_after_answer
config, current_related_config_keys = SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
config)
config
)
related_config_keys.extend(current_related_config_keys)
# speech_to_text
@@ -139,8 +137,9 @@ class ChatAppConfigManager(BaseAppConfigManager):
related_config_keys.extend(current_related_config_keys)
# moderation validation
config, current_related_config_keys = SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(tenant_id,
config)
config, current_related_config_keys = SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(
tenant_id, config
)
related_config_keys.extend(current_related_config_keys)
related_config_keys = list(set(related_config_keys))

View File

@@ -30,7 +30,8 @@ logger = logging.getLogger(__name__)
class ChatAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self, app_model: App,
self,
app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
@@ -39,7 +40,8 @@ class ChatAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self, app_model: App,
self,
app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
@@ -47,7 +49,8 @@ class ChatAppGenerator(MessageBasedAppGenerator):
) -> dict: ...
def generate(
self, app_model: App,
self,
app_model: App,
user: Union[Account, EndUser],
args: Any,
invoke_from: InvokeFrom,
@@ -62,58 +65,46 @@ class ChatAppGenerator(MessageBasedAppGenerator):
:param invoke_from: invoke from source
:param stream: is stream
"""
if not args.get('query'):
raise ValueError('query is required')
if not args.get("query"):
raise ValueError("query is required")
query = args['query']
query = args["query"]
if not isinstance(query, str):
raise ValueError('query must be a string')
raise ValueError("query must be a string")
query = query.replace('\x00', '')
inputs = args['inputs']
query = query.replace("\x00", "")
inputs = args["inputs"]
extras = {
"auto_generate_conversation_name": args.get('auto_generate_name', True)
}
extras = {"auto_generate_conversation_name": args.get("auto_generate_name", True)}
# get conversation
conversation = None
if args.get('conversation_id'):
conversation = self._get_conversation_by_user(app_model, args.get('conversation_id'), user)
if args.get("conversation_id"):
conversation = self._get_conversation_by_user(app_model, args.get("conversation_id"), user)
# get app model config
app_model_config = self._get_app_model_config(
app_model=app_model,
conversation=conversation
)
app_model_config = self._get_app_model_config(app_model=app_model, conversation=conversation)
# validate override model config
override_model_config_dict = None
if args.get('model_config'):
if args.get("model_config"):
if invoke_from != InvokeFrom.DEBUGGER:
raise ValueError('Only in App debug mode can override model config')
raise ValueError("Only in App debug mode can override model config")
# validate config
override_model_config_dict = ChatAppConfigManager.config_validate(
tenant_id=app_model.tenant_id,
config=args.get('model_config')
tenant_id=app_model.tenant_id, config=args.get("model_config")
)
# always enable retriever resource in debugger mode
override_model_config_dict["retriever_resource"] = {
"enabled": True
}
override_model_config_dict["retriever_resource"] = {"enabled": True}
# parse files
files = args['files'] if args.get('files') else []
files = args["files"] if args.get("files") else []
message_file_parser = MessageFileParser(tenant_id=app_model.tenant_id, app_id=app_model.id)
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
if file_extra_config:
file_objs = message_file_parser.validate_and_transform_files_arg(
files,
file_extra_config,
user
)
file_objs = message_file_parser.validate_and_transform_files_arg(files, file_extra_config, user)
else:
file_objs = []
@@ -122,7 +113,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
app_model=app_model,
app_model_config=app_model_config,
conversation=conversation,
override_config_dict=override_model_config_dict
override_config_dict=override_model_config_dict,
)
# get tracing instance
@@ -141,14 +132,11 @@ class ChatAppGenerator(MessageBasedAppGenerator):
stream=stream,
invoke_from=invoke_from,
extras=extras,
trace_manager=trace_manager
trace_manager=trace_manager,
)
# init generate records
(
conversation,
message
) = self._init_generate_records(application_generate_entity, conversation)
(conversation, message) = self._init_generate_records(application_generate_entity, conversation)
# init queue manager
queue_manager = MessageBasedAppQueueManager(
@@ -157,17 +145,20 @@ class ChatAppGenerator(MessageBasedAppGenerator):
invoke_from=application_generate_entity.invoke_from,
conversation_id=conversation.id,
app_mode=conversation.mode,
message_id=message.id
message_id=message.id,
)
# new thread
worker_thread = threading.Thread(target=self._generate_worker, kwargs={
'flask_app': current_app._get_current_object(),
'application_generate_entity': application_generate_entity,
'queue_manager': queue_manager,
'conversation_id': conversation.id,
'message_id': message.id,
})
worker_thread = threading.Thread(
target=self._generate_worker,
kwargs={
"flask_app": current_app._get_current_object(),
"application_generate_entity": application_generate_entity,
"queue_manager": queue_manager,
"conversation_id": conversation.id,
"message_id": message.id,
},
)
worker_thread.start()
@@ -181,16 +172,16 @@ class ChatAppGenerator(MessageBasedAppGenerator):
stream=stream,
)
return ChatAppGenerateResponseConverter.convert(
response=response,
invoke_from=invoke_from
)
return ChatAppGenerateResponseConverter.convert(response=response, invoke_from=invoke_from)
def _generate_worker(self, flask_app: Flask,
application_generate_entity: ChatAppGenerateEntity,
queue_manager: AppQueueManager,
conversation_id: str,
message_id: str) -> None:
def _generate_worker(
self,
flask_app: Flask,
application_generate_entity: ChatAppGenerateEntity,
queue_manager: AppQueueManager,
conversation_id: str,
message_id: str,
) -> None:
"""
Generate worker in a new thread.
:param flask_app: Flask app
@@ -212,20 +203,19 @@ class ChatAppGenerator(MessageBasedAppGenerator):
application_generate_entity=application_generate_entity,
queue_manager=queue_manager,
conversation=conversation,
message=message
message=message,
)
except GenerateTaskStoppedException:
pass
except InvokeAuthorizationError:
queue_manager.publish_error(
InvokeAuthorizationError('Incorrect API key provided'),
PublishFrom.APPLICATION_MANAGER
InvokeAuthorizationError("Incorrect API key provided"), PublishFrom.APPLICATION_MANAGER
)
except ValidationError as e:
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == 'true':
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
logger.exception("Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:

View File

@@ -24,10 +24,13 @@ class ChatAppRunner(AppRunner):
Chat Application Runner
"""
def run(self, application_generate_entity: ChatAppGenerateEntity,
queue_manager: AppQueueManager,
conversation: Conversation,
message: Message) -> None:
def run(
self,
application_generate_entity: ChatAppGenerateEntity,
queue_manager: AppQueueManager,
conversation: Conversation,
message: Message,
) -> None:
"""
Run application
:param application_generate_entity: application generate entity
@@ -58,7 +61,7 @@ class ChatAppRunner(AppRunner):
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
query=query
query=query,
)
memory = None
@@ -66,13 +69,10 @@ class ChatAppRunner(AppRunner):
# get memory of conversation (read-only)
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
model=application_generate_entity.model_conf.model,
)
memory = TokenBufferMemory(
conversation=conversation,
model_instance=model_instance
)
memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
# organize all inputs and template to prompt messages
# Include: prompt template, inputs, query(optional), files(optional)
@@ -84,7 +84,7 @@ class ChatAppRunner(AppRunner):
inputs=inputs,
files=files,
query=query,
memory=memory
memory=memory,
)
# moderation
@@ -96,7 +96,7 @@ class ChatAppRunner(AppRunner):
app_generate_entity=application_generate_entity,
inputs=inputs,
query=query,
message_id=message.id
message_id=message.id,
)
except ModerationException as e:
self.direct_output(
@@ -104,7 +104,7 @@ class ChatAppRunner(AppRunner):
app_generate_entity=application_generate_entity,
prompt_messages=prompt_messages,
text=str(e),
stream=application_generate_entity.stream
stream=application_generate_entity.stream,
)
return
@@ -115,13 +115,13 @@ class ChatAppRunner(AppRunner):
message=message,
query=query,
user_id=application_generate_entity.user_id,
invoke_from=application_generate_entity.invoke_from
invoke_from=application_generate_entity.invoke_from,
)
if annotation_reply:
queue_manager.publish(
QueueAnnotationReplyEvent(message_annotation_id=annotation_reply.id),
PublishFrom.APPLICATION_MANAGER
PublishFrom.APPLICATION_MANAGER,
)
self.direct_output(
@@ -129,7 +129,7 @@ class ChatAppRunner(AppRunner):
app_generate_entity=application_generate_entity,
prompt_messages=prompt_messages,
text=annotation_reply.content,
stream=application_generate_entity.stream
stream=application_generate_entity.stream,
)
return
@@ -141,7 +141,7 @@ class ChatAppRunner(AppRunner):
app_id=app_record.id,
external_data_tools=external_data_tools,
inputs=inputs,
query=query
query=query,
)
# get context from datasets
@@ -152,7 +152,7 @@ class ChatAppRunner(AppRunner):
app_record.id,
message.id,
application_generate_entity.user_id,
application_generate_entity.invoke_from
application_generate_entity.invoke_from,
)
dataset_retrieval = DatasetRetrieval(application_generate_entity)
@@ -181,29 +181,26 @@ class ChatAppRunner(AppRunner):
files=files,
query=query,
context=context,
memory=memory
memory=memory,
)
# check hosting moderation
hosting_moderation_result = self.check_hosting_moderation(
application_generate_entity=application_generate_entity,
queue_manager=queue_manager,
prompt_messages=prompt_messages
prompt_messages=prompt_messages,
)
if hosting_moderation_result:
return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recalc_llm_max_tokens(
model_config=application_generate_entity.model_conf,
prompt_messages=prompt_messages
)
self.recalc_llm_max_tokens(model_config=application_generate_entity.model_conf, prompt_messages=prompt_messages)
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
model=application_generate_entity.model_conf.model,
)
db.session.close()
@@ -218,7 +215,5 @@ class ChatAppRunner(AppRunner):
# handle invoke result
self._handle_invoke_result(
invoke_result=invoke_result,
queue_manager=queue_manager,
stream=application_generate_entity.stream
invoke_result=invoke_result, queue_manager=queue_manager, stream=application_generate_entity.stream
)

View File

@@ -23,15 +23,15 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
:return:
"""
response = {
'event': 'message',
'task_id': blocking_response.task_id,
'id': blocking_response.data.id,
'message_id': blocking_response.data.message_id,
'conversation_id': blocking_response.data.conversation_id,
'mode': blocking_response.data.mode,
'answer': blocking_response.data.answer,
'metadata': blocking_response.data.metadata,
'created_at': blocking_response.data.created_at
"event": "message",
"task_id": blocking_response.task_id,
"id": blocking_response.data.id,
"message_id": blocking_response.data.message_id,
"conversation_id": blocking_response.data.conversation_id,
"mode": blocking_response.data.mode,
"answer": blocking_response.data.answer,
"metadata": blocking_response.data.metadata,
"created_at": blocking_response.data.created_at,
}
return response
@@ -45,14 +45,15 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
"""
response = cls.convert_blocking_full_response(blocking_response)
metadata = response.get('metadata', {})
response['metadata'] = cls._get_simple_metadata(metadata)
metadata = response.get("metadata", {})
response["metadata"] = cls._get_simple_metadata(metadata)
return response
@classmethod
def convert_stream_full_response(cls, stream_response: Generator[ChatbotAppStreamResponse, None, None]) \
-> Generator[str, None, None]:
def convert_stream_full_response(
cls, stream_response: Generator[ChatbotAppStreamResponse, None, None]
) -> Generator[str, None, None]:
"""
Convert stream full response.
:param stream_response: stream response
@@ -63,14 +64,14 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
sub_stream_response = chunk.stream_response
if isinstance(sub_stream_response, PingStreamResponse):
yield 'ping'
yield "ping"
continue
response_chunk = {
'event': sub_stream_response.event.value,
'conversation_id': chunk.conversation_id,
'message_id': chunk.message_id,
'created_at': chunk.created_at
"event": sub_stream_response.event.value,
"conversation_id": chunk.conversation_id,
"message_id": chunk.message_id,
"created_at": chunk.created_at,
}
if isinstance(sub_stream_response, ErrorStreamResponse):
@@ -81,8 +82,9 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
yield json.dumps(response_chunk)
@classmethod
def convert_stream_simple_response(cls, stream_response: Generator[ChatbotAppStreamResponse, None, None]) \
-> Generator[str, None, None]:
def convert_stream_simple_response(
cls, stream_response: Generator[ChatbotAppStreamResponse, None, None]
) -> Generator[str, None, None]:
"""
Convert stream simple response.
:param stream_response: stream response
@@ -93,20 +95,20 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
sub_stream_response = chunk.stream_response
if isinstance(sub_stream_response, PingStreamResponse):
yield 'ping'
yield "ping"
continue
response_chunk = {
'event': sub_stream_response.event.value,
'conversation_id': chunk.conversation_id,
'message_id': chunk.message_id,
'created_at': chunk.created_at
"event": sub_stream_response.event.value,
"conversation_id": chunk.conversation_id,
"message_id": chunk.message_id,
"created_at": chunk.created_at,
}
if isinstance(sub_stream_response, MessageEndStreamResponse):
sub_stream_response_dict = sub_stream_response.to_dict()
metadata = sub_stream_response_dict.get('metadata', {})
sub_stream_response_dict['metadata'] = cls._get_simple_metadata(metadata)
metadata = sub_stream_response_dict.get("metadata", {})
sub_stream_response_dict["metadata"] = cls._get_simple_metadata(metadata)
response_chunk.update(sub_stream_response_dict)
if isinstance(sub_stream_response, ErrorStreamResponse):
data = cls._error_to_stream_response(sub_stream_response.err)