improve: mordernizing validation by migrating pydantic from 1.x to 2.x (#4592)

This commit is contained in:
Bowen Liang
2024-06-14 01:05:37 +08:00
committed by GitHub
parent e8afc416dd
commit f976740b57
87 changed files with 697 additions and 300 deletions

View File

@@ -58,7 +58,7 @@ class AgentChatAppRunner(AppRunner):
# Not Include: memory, external data, dataset context
self.get_pre_calculate_rest_tokens(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -69,8 +69,8 @@ class AgentChatAppRunner(AppRunner):
if application_generate_entity.conversation_id:
# get memory of conversation (read-only)
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_config.provider_model_bundle,
model=application_generate_entity.model_config.model
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
)
memory = TokenBufferMemory(
@@ -83,7 +83,7 @@ class AgentChatAppRunner(AppRunner):
# memory(optional)
prompt_messages, _ = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -152,7 +152,7 @@ class AgentChatAppRunner(AppRunner):
# memory(optional), external data, dataset context(optional)
prompt_messages, _ = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -182,12 +182,12 @@ class AgentChatAppRunner(AppRunner):
# init model instance
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_config.provider_model_bundle,
model=application_generate_entity.model_config.model
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
)
prompt_message, _ = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -225,7 +225,7 @@ class AgentChatAppRunner(AppRunner):
application_generate_entity=application_generate_entity,
conversation=conversation,
app_config=app_config,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
config=agent_entity,
queue_manager=queue_manager,
message=message,

View File

@@ -100,7 +100,7 @@ class AppQueueManager:
:param pub_from:
:return:
"""
self._check_for_sqlalchemy_models(event.dict())
self._check_for_sqlalchemy_models(event.model_dump())
self._publish(event, pub_from)
@abstractmethod

View File

@@ -218,7 +218,7 @@ class AppRunner:
index = 0
for token in text:
chunk = LLMResultChunk(
model=app_generate_entity.model_config.model,
model=app_generate_entity.model_conf.model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=index,
@@ -237,7 +237,7 @@ class AppRunner:
queue_manager.publish(
QueueMessageEndEvent(
llm_result=LLMResult(
model=app_generate_entity.model_config.model,
model=app_generate_entity.model_conf.model,
prompt_messages=prompt_messages,
message=AssistantPromptMessage(content=text),
usage=usage if usage else LLMUsage.empty_usage()

View File

@@ -54,7 +54,7 @@ class ChatAppRunner(AppRunner):
# Not Include: memory, external data, dataset context
self.get_pre_calculate_rest_tokens(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -65,8 +65,8 @@ class ChatAppRunner(AppRunner):
if application_generate_entity.conversation_id:
# get memory of conversation (read-only)
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_config.provider_model_bundle,
model=application_generate_entity.model_config.model
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
)
memory = TokenBufferMemory(
@@ -79,7 +79,7 @@ class ChatAppRunner(AppRunner):
# memory(optional)
prompt_messages, stop = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -159,7 +159,7 @@ class ChatAppRunner(AppRunner):
app_id=app_record.id,
user_id=application_generate_entity.user_id,
tenant_id=app_record.tenant_id,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
config=app_config.dataset,
query=query,
invoke_from=application_generate_entity.invoke_from,
@@ -173,7 +173,7 @@ class ChatAppRunner(AppRunner):
# memory(optional), external data, dataset context(optional)
prompt_messages, stop = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -194,21 +194,21 @@ class ChatAppRunner(AppRunner):
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recalc_llm_max_tokens(
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_messages=prompt_messages
)
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_config.provider_model_bundle,
model=application_generate_entity.model_config.model
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
)
db.session.close()
invoke_result = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=application_generate_entity.model_config.parameters,
model_parameters=application_generate_entity.model_conf.parameters,
stop=stop,
stream=application_generate_entity.stream,
user=application_generate_entity.user_id,

View File

@@ -50,7 +50,7 @@ class CompletionAppRunner(AppRunner):
# Not Include: memory, external data, dataset context
self.get_pre_calculate_rest_tokens(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -61,7 +61,7 @@ class CompletionAppRunner(AppRunner):
# Include: prompt template, inputs, query(optional), files(optional)
prompt_messages, stop = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -119,7 +119,7 @@ class CompletionAppRunner(AppRunner):
app_id=app_record.id,
user_id=application_generate_entity.user_id,
tenant_id=app_record.tenant_id,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
config=dataset_config,
query=query,
invoke_from=application_generate_entity.invoke_from,
@@ -132,7 +132,7 @@ class CompletionAppRunner(AppRunner):
# memory(optional), external data, dataset context(optional)
prompt_messages, stop = self.organize_prompt_messages(
app_record=app_record,
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template,
inputs=inputs,
files=files,
@@ -152,21 +152,21 @@ class CompletionAppRunner(AppRunner):
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recalc_llm_max_tokens(
model_config=application_generate_entity.model_config,
model_config=application_generate_entity.model_conf,
prompt_messages=prompt_messages
)
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=application_generate_entity.model_config.provider_model_bundle,
model=application_generate_entity.model_config.model
provider_model_bundle=application_generate_entity.model_conf.provider_model_bundle,
model=application_generate_entity.model_conf.model
)
db.session.close()
invoke_result = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=application_generate_entity.model_config.parameters,
model_parameters=application_generate_entity.model_conf.parameters,
stop=stop,
stream=application_generate_entity.stream,
user=application_generate_entity.user_id,

View File

@@ -158,8 +158,8 @@ class MessageBasedAppGenerator(BaseAppGenerator):
model_id = None
else:
app_model_config_id = app_config.app_model_config_id
model_provider = application_generate_entity.model_config.provider
model_id = application_generate_entity.model_config.model
model_provider = application_generate_entity.model_conf.provider
model_id = application_generate_entity.model_conf.model
override_model_configs = None
if app_config.app_model_config_from == EasyUIBasedAppModelConfigFrom.ARGS \
and app_config.app_mode in [AppMode.AGENT_CHAT, AppMode.CHAT, AppMode.COMPLETION]: