feat: backend model load balancing support (#4927)

This commit is contained in:
takatost
2024-06-05 00:13:04 +08:00
committed by GitHub
parent 52ec152dd3
commit d1dbbc1e33
47 changed files with 2191 additions and 256 deletions

View File

@@ -1,6 +1,6 @@
import time
from collections.abc import Generator
from typing import Optional, Union, cast
from typing import Optional, Union
from core.app.app_config.entities import ExternalDataVariableEntity, PromptTemplateEntity
from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom
@@ -16,11 +16,11 @@ from core.app.features.hosting_moderation.hosting_moderation import HostingModer
from core.external_data_tool.external_data_fetch import ExternalDataFetch
from core.file.file_obj import FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.moderation.input_moderation import InputModeration
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
@@ -45,8 +45,11 @@ class AppRunner:
:param query: query
:return:
"""
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
# Invoke model
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle,
model=model_config.model
)
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
@@ -73,9 +76,7 @@ class AppRunner:
query=query
)
prompt_tokens = model_type_instance.get_num_tokens(
model_config.model,
model_config.credentials,
prompt_tokens = model_instance.get_llm_num_tokens(
prompt_messages
)
@@ -89,8 +90,10 @@ class AppRunner:
def recalc_llm_max_tokens(self, model_config: ModelConfigWithCredentialsEntity,
prompt_messages: list[PromptMessage]):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle,
model=model_config.model
)
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
@@ -107,9 +110,7 @@ class AppRunner:
if max_tokens is None:
max_tokens = 0
prompt_tokens = model_type_instance.get_num_tokens(
model_config.model,
model_config.credentials,
prompt_tokens = model_instance.get_llm_num_tokens(
prompt_messages
)

View File

@@ -37,6 +37,7 @@ from core.app.entities.task_entities import (
)
from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline
from core.app.task_pipeline.message_cycle_manage import MessageCycleManage
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
@@ -317,29 +318,30 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline, MessageCycleMan
"""
model_config = self._model_config
model = model_config.model
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle,
model=model_config.model
)
# calculate num tokens
prompt_tokens = 0
if event.stopped_by != QueueStopEvent.StopBy.ANNOTATION_REPLY:
prompt_tokens = model_type_instance.get_num_tokens(
model,
model_config.credentials,
prompt_tokens = model_instance.get_llm_num_tokens(
self._task_state.llm_result.prompt_messages
)
completion_tokens = 0
if event.stopped_by == QueueStopEvent.StopBy.USER_MANUAL:
completion_tokens = model_type_instance.get_num_tokens(
model,
model_config.credentials,
completion_tokens = model_instance.get_llm_num_tokens(
[self._task_state.llm_result.message]
)
credentials = model_config.credentials
# transform usage
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
self._task_state.llm_result.usage = model_type_instance._calc_response_usage(
model,
credentials,

View File

@@ -16,6 +16,7 @@ class ModelStatus(Enum):
NO_CONFIGURE = "no-configure"
QUOTA_EXCEEDED = "quota-exceeded"
NO_PERMISSION = "no-permission"
DISABLED = "disabled"
class SimpleModelProviderEntity(BaseModel):
@@ -43,12 +44,19 @@ class SimpleModelProviderEntity(BaseModel):
)
class ModelWithProviderEntity(ProviderModel):
class ProviderModelWithStatusEntity(ProviderModel):
"""
Model class for model response.
"""
status: ModelStatus
load_balancing_enabled: bool = False
class ModelWithProviderEntity(ProviderModelWithStatusEntity):
"""
Model with provider entity.
"""
provider: SimpleModelProviderEntity
status: ModelStatus
class DefaultModelProviderEntity(BaseModel):

View File

@@ -1,6 +1,7 @@
import datetime
import json
import logging
from collections import defaultdict
from collections.abc import Iterator
from json import JSONDecodeError
from typing import Optional
@@ -8,7 +9,12 @@ from typing import Optional
from pydantic import BaseModel
from core.entities.model_entities import ModelStatus, ModelWithProviderEntity, SimpleModelProviderEntity
from core.entities.provider_entities import CustomConfiguration, SystemConfiguration, SystemConfigurationStatus
from core.entities.provider_entities import (
CustomConfiguration,
ModelSettings,
SystemConfiguration,
SystemConfigurationStatus,
)
from core.helper import encrypter
from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderCredentialsCacheType
from core.model_runtime.entities.model_entities import FetchFrom, ModelType
@@ -22,7 +28,14 @@ from core.model_runtime.model_providers import model_provider_factory
from core.model_runtime.model_providers.__base.ai_model import AIModel
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
from extensions.ext_database import db
from models.provider import Provider, ProviderModel, ProviderType, TenantPreferredModelProvider
from models.provider import (
LoadBalancingModelConfig,
Provider,
ProviderModel,
ProviderModelSetting,
ProviderType,
TenantPreferredModelProvider,
)
logger = logging.getLogger(__name__)
@@ -39,6 +52,7 @@ class ProviderConfiguration(BaseModel):
using_provider_type: ProviderType
system_configuration: SystemConfiguration
custom_configuration: CustomConfiguration
model_settings: list[ModelSettings]
def __init__(self, **data):
super().__init__(**data)
@@ -62,6 +76,14 @@ class ProviderConfiguration(BaseModel):
:param model: model name
:return:
"""
if self.model_settings:
# check if model is disabled by admin
for model_setting in self.model_settings:
if (model_setting.model_type == model_type
and model_setting.model == model):
if not model_setting.enabled:
raise ValueError(f'Model {model} is disabled.')
if self.using_provider_type == ProviderType.SYSTEM:
restrict_models = []
for quota_configuration in self.system_configuration.quota_configurations:
@@ -80,15 +102,17 @@ class ProviderConfiguration(BaseModel):
return copy_credentials
else:
credentials = None
if self.custom_configuration.models:
for model_configuration in self.custom_configuration.models:
if model_configuration.model_type == model_type and model_configuration.model == model:
return model_configuration.credentials
credentials = model_configuration.credentials
break
if self.custom_configuration.provider:
return self.custom_configuration.provider.credentials
else:
return None
credentials = self.custom_configuration.provider.credentials
return credentials
def get_system_configuration_status(self) -> SystemConfigurationStatus:
"""
@@ -130,7 +154,7 @@ class ProviderConfiguration(BaseModel):
return credentials
# Obfuscate credentials
return self._obfuscated_credentials(
return self.obfuscated_credentials(
credentials=credentials,
credential_form_schemas=self.provider.provider_credential_schema.credential_form_schemas
if self.provider.provider_credential_schema else []
@@ -151,7 +175,7 @@ class ProviderConfiguration(BaseModel):
).first()
# Get provider credential secret variables
provider_credential_secret_variables = self._extract_secret_variables(
provider_credential_secret_variables = self.extract_secret_variables(
self.provider.provider_credential_schema.credential_form_schemas
if self.provider.provider_credential_schema else []
)
@@ -274,7 +298,7 @@ class ProviderConfiguration(BaseModel):
return credentials
# Obfuscate credentials
return self._obfuscated_credentials(
return self.obfuscated_credentials(
credentials=credentials,
credential_form_schemas=self.provider.model_credential_schema.credential_form_schemas
if self.provider.model_credential_schema else []
@@ -302,7 +326,7 @@ class ProviderConfiguration(BaseModel):
).first()
# Get provider credential secret variables
provider_credential_secret_variables = self._extract_secret_variables(
provider_credential_secret_variables = self.extract_secret_variables(
self.provider.model_credential_schema.credential_form_schemas
if self.provider.model_credential_schema else []
)
@@ -402,6 +426,160 @@ class ProviderConfiguration(BaseModel):
provider_model_credentials_cache.delete()
def enable_model(self, model_type: ModelType, model: str) -> ProviderModelSetting:
"""
Enable model.
:param model_type: model type
:param model: model name
:return:
"""
model_setting = db.session.query(ProviderModelSetting) \
.filter(
ProviderModelSetting.tenant_id == self.tenant_id,
ProviderModelSetting.provider_name == self.provider.provider,
ProviderModelSetting.model_type == model_type.to_origin_model_type(),
ProviderModelSetting.model_name == model
).first()
if model_setting:
model_setting.enabled = True
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
db.session.commit()
else:
model_setting = ProviderModelSetting(
tenant_id=self.tenant_id,
provider_name=self.provider.provider,
model_type=model_type.to_origin_model_type(),
model_name=model,
enabled=True
)
db.session.add(model_setting)
db.session.commit()
return model_setting
def disable_model(self, model_type: ModelType, model: str) -> ProviderModelSetting:
"""
Disable model.
:param model_type: model type
:param model: model name
:return:
"""
model_setting = db.session.query(ProviderModelSetting) \
.filter(
ProviderModelSetting.tenant_id == self.tenant_id,
ProviderModelSetting.provider_name == self.provider.provider,
ProviderModelSetting.model_type == model_type.to_origin_model_type(),
ProviderModelSetting.model_name == model
).first()
if model_setting:
model_setting.enabled = False
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
db.session.commit()
else:
model_setting = ProviderModelSetting(
tenant_id=self.tenant_id,
provider_name=self.provider.provider,
model_type=model_type.to_origin_model_type(),
model_name=model,
enabled=False
)
db.session.add(model_setting)
db.session.commit()
return model_setting
def get_provider_model_setting(self, model_type: ModelType, model: str) -> Optional[ProviderModelSetting]:
"""
Get provider model setting.
:param model_type: model type
:param model: model name
:return:
"""
return db.session.query(ProviderModelSetting) \
.filter(
ProviderModelSetting.tenant_id == self.tenant_id,
ProviderModelSetting.provider_name == self.provider.provider,
ProviderModelSetting.model_type == model_type.to_origin_model_type(),
ProviderModelSetting.model_name == model
).first()
def enable_model_load_balancing(self, model_type: ModelType, model: str) -> ProviderModelSetting:
"""
Enable model load balancing.
:param model_type: model type
:param model: model name
:return:
"""
load_balancing_config_count = db.session.query(LoadBalancingModelConfig) \
.filter(
LoadBalancingModelConfig.tenant_id == self.tenant_id,
LoadBalancingModelConfig.provider_name == self.provider.provider,
LoadBalancingModelConfig.model_type == model_type.to_origin_model_type(),
LoadBalancingModelConfig.model_name == model
).count()
if load_balancing_config_count <= 1:
raise ValueError('Model load balancing configuration must be more than 1.')
model_setting = db.session.query(ProviderModelSetting) \
.filter(
ProviderModelSetting.tenant_id == self.tenant_id,
ProviderModelSetting.provider_name == self.provider.provider,
ProviderModelSetting.model_type == model_type.to_origin_model_type(),
ProviderModelSetting.model_name == model
).first()
if model_setting:
model_setting.load_balancing_enabled = True
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
db.session.commit()
else:
model_setting = ProviderModelSetting(
tenant_id=self.tenant_id,
provider_name=self.provider.provider,
model_type=model_type.to_origin_model_type(),
model_name=model,
load_balancing_enabled=True
)
db.session.add(model_setting)
db.session.commit()
return model_setting
def disable_model_load_balancing(self, model_type: ModelType, model: str) -> ProviderModelSetting:
"""
Disable model load balancing.
:param model_type: model type
:param model: model name
:return:
"""
model_setting = db.session.query(ProviderModelSetting) \
.filter(
ProviderModelSetting.tenant_id == self.tenant_id,
ProviderModelSetting.provider_name == self.provider.provider,
ProviderModelSetting.model_type == model_type.to_origin_model_type(),
ProviderModelSetting.model_name == model
).first()
if model_setting:
model_setting.load_balancing_enabled = False
model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
db.session.commit()
else:
model_setting = ProviderModelSetting(
tenant_id=self.tenant_id,
provider_name=self.provider.provider,
model_type=model_type.to_origin_model_type(),
model_name=model,
load_balancing_enabled=False
)
db.session.add(model_setting)
db.session.commit()
return model_setting
def get_provider_instance(self) -> ModelProvider:
"""
Get provider instance.
@@ -453,7 +631,7 @@ class ProviderConfiguration(BaseModel):
db.session.commit()
def _extract_secret_variables(self, credential_form_schemas: list[CredentialFormSchema]) -> list[str]:
def extract_secret_variables(self, credential_form_schemas: list[CredentialFormSchema]) -> list[str]:
"""
Extract secret input form variables.
@@ -467,7 +645,7 @@ class ProviderConfiguration(BaseModel):
return secret_input_form_variables
def _obfuscated_credentials(self, credentials: dict, credential_form_schemas: list[CredentialFormSchema]) -> dict:
def obfuscated_credentials(self, credentials: dict, credential_form_schemas: list[CredentialFormSchema]) -> dict:
"""
Obfuscated credentials.
@@ -476,7 +654,7 @@ class ProviderConfiguration(BaseModel):
:return:
"""
# Get provider credential secret variables
credential_secret_variables = self._extract_secret_variables(
credential_secret_variables = self.extract_secret_variables(
credential_form_schemas
)
@@ -522,15 +700,22 @@ class ProviderConfiguration(BaseModel):
else:
model_types = provider_instance.get_provider_schema().supported_model_types
# Group model settings by model type and model
model_setting_map = defaultdict(dict)
for model_setting in self.model_settings:
model_setting_map[model_setting.model_type][model_setting.model] = model_setting
if self.using_provider_type == ProviderType.SYSTEM:
provider_models = self._get_system_provider_models(
model_types=model_types,
provider_instance=provider_instance
provider_instance=provider_instance,
model_setting_map=model_setting_map
)
else:
provider_models = self._get_custom_provider_models(
model_types=model_types,
provider_instance=provider_instance
provider_instance=provider_instance,
model_setting_map=model_setting_map
)
if only_active:
@@ -541,18 +726,27 @@ class ProviderConfiguration(BaseModel):
def _get_system_provider_models(self,
model_types: list[ModelType],
provider_instance: ModelProvider) -> list[ModelWithProviderEntity]:
provider_instance: ModelProvider,
model_setting_map: dict[ModelType, dict[str, ModelSettings]]) \
-> list[ModelWithProviderEntity]:
"""
Get system provider models.
:param model_types: model types
:param provider_instance: provider instance
:param model_setting_map: model setting map
:return:
"""
provider_models = []
for model_type in model_types:
provider_models.extend(
[
for m in provider_instance.models(model_type):
status = ModelStatus.ACTIVE
if m.model_type in model_setting_map and m.model in model_setting_map[m.model_type]:
model_setting = model_setting_map[m.model_type][m.model]
if model_setting.enabled is False:
status = ModelStatus.DISABLED
provider_models.append(
ModelWithProviderEntity(
model=m.model,
label=m.label,
@@ -562,11 +756,9 @@ class ProviderConfiguration(BaseModel):
model_properties=m.model_properties,
deprecated=m.deprecated,
provider=SimpleModelProviderEntity(self.provider),
status=ModelStatus.ACTIVE
status=status
)
for m in provider_instance.models(model_type)
]
)
)
if self.provider.provider not in original_provider_configurate_methods:
original_provider_configurate_methods[self.provider.provider] = []
@@ -586,7 +778,8 @@ class ProviderConfiguration(BaseModel):
break
if should_use_custom_model:
if original_provider_configurate_methods[self.provider.provider] == [ConfigurateMethod.CUSTOMIZABLE_MODEL]:
if original_provider_configurate_methods[self.provider.provider] == [
ConfigurateMethod.CUSTOMIZABLE_MODEL]:
# only customizable model
for restrict_model in restrict_models:
copy_credentials = self.system_configuration.credentials.copy()
@@ -611,6 +804,13 @@ class ProviderConfiguration(BaseModel):
if custom_model_schema.model_type not in model_types:
continue
status = ModelStatus.ACTIVE
if (custom_model_schema.model_type in model_setting_map
and custom_model_schema.model in model_setting_map[custom_model_schema.model_type]):
model_setting = model_setting_map[custom_model_schema.model_type][custom_model_schema.model]
if model_setting.enabled is False:
status = ModelStatus.DISABLED
provider_models.append(
ModelWithProviderEntity(
model=custom_model_schema.model,
@@ -621,7 +821,7 @@ class ProviderConfiguration(BaseModel):
model_properties=custom_model_schema.model_properties,
deprecated=custom_model_schema.deprecated,
provider=SimpleModelProviderEntity(self.provider),
status=ModelStatus.ACTIVE
status=status
)
)
@@ -632,16 +832,20 @@ class ProviderConfiguration(BaseModel):
m.status = ModelStatus.NO_PERMISSION
elif not quota_configuration.is_valid:
m.status = ModelStatus.QUOTA_EXCEEDED
return provider_models
def _get_custom_provider_models(self,
model_types: list[ModelType],
provider_instance: ModelProvider) -> list[ModelWithProviderEntity]:
provider_instance: ModelProvider,
model_setting_map: dict[ModelType, dict[str, ModelSettings]]) \
-> list[ModelWithProviderEntity]:
"""
Get custom provider models.
:param model_types: model types
:param provider_instance: provider instance
:param model_setting_map: model setting map
:return:
"""
provider_models = []
@@ -656,6 +860,16 @@ class ProviderConfiguration(BaseModel):
models = provider_instance.models(model_type)
for m in models:
status = ModelStatus.ACTIVE if credentials else ModelStatus.NO_CONFIGURE
load_balancing_enabled = False
if m.model_type in model_setting_map and m.model in model_setting_map[m.model_type]:
model_setting = model_setting_map[m.model_type][m.model]
if model_setting.enabled is False:
status = ModelStatus.DISABLED
if len(model_setting.load_balancing_configs) > 1:
load_balancing_enabled = True
provider_models.append(
ModelWithProviderEntity(
model=m.model,
@@ -666,7 +880,8 @@ class ProviderConfiguration(BaseModel):
model_properties=m.model_properties,
deprecated=m.deprecated,
provider=SimpleModelProviderEntity(self.provider),
status=ModelStatus.ACTIVE if credentials else ModelStatus.NO_CONFIGURE
status=status,
load_balancing_enabled=load_balancing_enabled
)
)
@@ -690,6 +905,17 @@ class ProviderConfiguration(BaseModel):
if not custom_model_schema:
continue
status = ModelStatus.ACTIVE
load_balancing_enabled = False
if (custom_model_schema.model_type in model_setting_map
and custom_model_schema.model in model_setting_map[custom_model_schema.model_type]):
model_setting = model_setting_map[custom_model_schema.model_type][custom_model_schema.model]
if model_setting.enabled is False:
status = ModelStatus.DISABLED
if len(model_setting.load_balancing_configs) > 1:
load_balancing_enabled = True
provider_models.append(
ModelWithProviderEntity(
model=custom_model_schema.model,
@@ -700,7 +926,8 @@ class ProviderConfiguration(BaseModel):
model_properties=custom_model_schema.model_properties,
deprecated=custom_model_schema.deprecated,
provider=SimpleModelProviderEntity(self.provider),
status=ModelStatus.ACTIVE
status=status,
load_balancing_enabled=load_balancing_enabled
)
)

View File

@@ -72,3 +72,22 @@ class CustomConfiguration(BaseModel):
"""
provider: Optional[CustomProviderConfiguration] = None
models: list[CustomModelConfiguration] = []
class ModelLoadBalancingConfiguration(BaseModel):
"""
Class for model load balancing configuration.
"""
id: str
name: str
credentials: dict
class ModelSettings(BaseModel):
"""
Model class for model settings.
"""
model: str
model_type: ModelType
enabled: bool = True
load_balancing_configs: list[ModelLoadBalancingConfiguration] = []

View File

@@ -7,7 +7,7 @@ from typing import Any, Optional
from pydantic import BaseModel
from core.utils.position_helper import sort_to_dict_by_position_map
from core.helper.position_helper import sort_to_dict_by_position_map
class ExtensionModule(enum.Enum):

View File

@@ -9,6 +9,7 @@ from extensions.ext_redis import redis_client
class ProviderCredentialsCacheType(Enum):
PROVIDER = "provider"
MODEL = "provider_model"
LOAD_BALANCING_MODEL = "load_balancing_provider_model"
class ProviderCredentialsCache:

View File

@@ -286,11 +286,7 @@ class IndexingRunner:
if len(preview_texts) < 5:
preview_texts.append(document.page_content)
if indexing_technique == 'high_quality' or embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
tokens += embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
tokens += embedding_model_instance.get_text_embedding_num_tokens(
texts=[self.filter_string(document.page_content)]
)
@@ -658,10 +654,6 @@ class IndexingRunner:
tokens = 0
chunk_size = 10
embedding_model_type_instance = None
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
# create keyword index
create_keyword_thread = threading.Thread(target=self._process_keyword_index,
args=(current_app._get_current_object(),
@@ -674,8 +666,7 @@ class IndexingRunner:
chunk_documents = documents[i:i + chunk_size]
futures.append(executor.submit(self._process_chunk, current_app._get_current_object(), index_processor,
chunk_documents, dataset,
dataset_document, embedding_model_instance,
embedding_model_type_instance))
dataset_document, embedding_model_instance))
for future in futures:
tokens += future.result()
@@ -716,7 +707,7 @@ class IndexingRunner:
db.session.commit()
def _process_chunk(self, flask_app, index_processor, chunk_documents, dataset, dataset_document,
embedding_model_instance, embedding_model_type_instance):
embedding_model_instance):
with flask_app.app_context():
# check document is paused
self._check_document_paused_status(dataset_document.id)
@@ -724,9 +715,7 @@ class IndexingRunner:
tokens = 0
if dataset.indexing_technique == 'high_quality' or embedding_model_type_instance:
tokens += sum(
embedding_model_type_instance.get_num_tokens(
embedding_model_instance.model,
embedding_model_instance.credentials,
embedding_model_instance.get_text_embedding_num_tokens(
[document.page_content]
)
for document in chunk_documents

View File

@@ -9,8 +9,6 @@ from core.model_runtime.entities.message_entities import (
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers import model_provider_factory
from extensions.ext_database import db
from models.model import AppMode, Conversation, Message
@@ -78,12 +76,7 @@ class TokenBufferMemory:
return []
# prune the chat message if it exceeds the max token limit
provider_instance = model_provider_factory.get_provider_instance(self.model_instance.provider)
model_type_instance = provider_instance.get_model_instance(ModelType.LLM)
curr_message_tokens = model_type_instance.get_num_tokens(
self.model_instance.model,
self.model_instance.credentials,
curr_message_tokens = self.model_instance.get_llm_num_tokens(
prompt_messages
)
@@ -91,9 +84,7 @@ class TokenBufferMemory:
pruned_memory = []
while curr_message_tokens > max_token_limit and prompt_messages:
pruned_memory.append(prompt_messages.pop(0))
curr_message_tokens = model_type_instance.get_num_tokens(
self.model_instance.model,
self.model_instance.credentials,
curr_message_tokens = self.model_instance.get_llm_num_tokens(
prompt_messages
)

View File

@@ -1,7 +1,10 @@
import logging
import os
from collections.abc import Generator
from typing import IO, Optional, Union, cast
from core.entities.provider_configuration import ProviderModelBundle
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import ModelLoadBalancingConfiguration
from core.errors.error import ProviderTokenNotInitError
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.llm_entities import LLMResult
@@ -9,6 +12,7 @@ from core.model_runtime.entities.message_entities import PromptMessage, PromptMe
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.rerank_entities import RerankResult
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeConnectionError, InvokeRateLimitError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.model_providers.__base.moderation_model import ModerationModel
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
@@ -16,6 +20,10 @@ from core.model_runtime.model_providers.__base.speech2text_model import Speech2T
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
from core.model_runtime.model_providers.__base.tts_model import TTSModel
from core.provider_manager import ProviderManager
from extensions.ext_redis import redis_client
from models.provider import ProviderType
logger = logging.getLogger(__name__)
class ModelInstance:
@@ -29,6 +37,12 @@ class ModelInstance:
self.provider = provider_model_bundle.configuration.provider.provider
self.credentials = self._fetch_credentials_from_bundle(provider_model_bundle, model)
self.model_type_instance = self.provider_model_bundle.model_type_instance
self.load_balancing_manager = self._get_load_balancing_manager(
configuration=provider_model_bundle.configuration,
model_type=provider_model_bundle.model_type_instance.model_type,
model=model,
credentials=self.credentials
)
def _fetch_credentials_from_bundle(self, provider_model_bundle: ProviderModelBundle, model: str) -> dict:
"""
@@ -37,8 +51,10 @@ class ModelInstance:
:param model: model name
:return:
"""
credentials = provider_model_bundle.configuration.get_current_credentials(
model_type=provider_model_bundle.model_type_instance.model_type,
configuration = provider_model_bundle.configuration
model_type = provider_model_bundle.model_type_instance.model_type
credentials = configuration.get_current_credentials(
model_type=model_type,
model=model
)
@@ -47,6 +63,43 @@ class ModelInstance:
return credentials
def _get_load_balancing_manager(self, configuration: ProviderConfiguration,
model_type: ModelType,
model: str,
credentials: dict) -> Optional["LBModelManager"]:
"""
Get load balancing model credentials
:param configuration: provider configuration
:param model_type: model type
:param model: model name
:param credentials: model credentials
:return:
"""
if configuration.model_settings and configuration.using_provider_type == ProviderType.CUSTOM:
current_model_setting = None
# check if model is disabled by admin
for model_setting in configuration.model_settings:
if (model_setting.model_type == model_type
and model_setting.model == model):
current_model_setting = model_setting
break
# check if load balancing is enabled
if current_model_setting and current_model_setting.load_balancing_configs:
# use load balancing proxy to choose credentials
lb_model_manager = LBModelManager(
tenant_id=configuration.tenant_id,
provider=configuration.provider.provider,
model_type=model_type,
model=model,
load_balancing_configs=current_model_setting.load_balancing_configs,
managed_credentials=credentials if configuration.custom_configuration.provider else None
)
return lb_model_manager
return None
def invoke_llm(self, prompt_messages: list[PromptMessage], model_parameters: Optional[dict] = None,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None, callbacks: list[Callback] = None) \
@@ -67,7 +120,8 @@ class ModelInstance:
raise Exception("Model type instance is not LargeLanguageModel")
self.model_type_instance = cast(LargeLanguageModel, self.model_type_instance)
return self.model_type_instance.invoke(
return self._round_robin_invoke(
function=self.model_type_instance.invoke,
model=self.model,
credentials=self.credentials,
prompt_messages=prompt_messages,
@@ -79,6 +133,27 @@ class ModelInstance:
callbacks=callbacks
)
def get_llm_num_tokens(self, prompt_messages: list[PromptMessage],
tools: Optional[list[PromptMessageTool]] = None) -> int:
"""
Get number of tokens for llm
:param prompt_messages: prompt messages
:param tools: tools for tool calling
:return:
"""
if not isinstance(self.model_type_instance, LargeLanguageModel):
raise Exception("Model type instance is not LargeLanguageModel")
self.model_type_instance = cast(LargeLanguageModel, self.model_type_instance)
return self._round_robin_invoke(
function=self.model_type_instance.get_num_tokens,
model=self.model,
credentials=self.credentials,
prompt_messages=prompt_messages,
tools=tools
)
def invoke_text_embedding(self, texts: list[str], user: Optional[str] = None) \
-> TextEmbeddingResult:
"""
@@ -92,13 +167,32 @@ class ModelInstance:
raise Exception("Model type instance is not TextEmbeddingModel")
self.model_type_instance = cast(TextEmbeddingModel, self.model_type_instance)
return self.model_type_instance.invoke(
return self._round_robin_invoke(
function=self.model_type_instance.invoke,
model=self.model,
credentials=self.credentials,
texts=texts,
user=user
)
def get_text_embedding_num_tokens(self, texts: list[str]) -> int:
"""
Get number of tokens for text embedding
:param texts: texts to embed
:return:
"""
if not isinstance(self.model_type_instance, TextEmbeddingModel):
raise Exception("Model type instance is not TextEmbeddingModel")
self.model_type_instance = cast(TextEmbeddingModel, self.model_type_instance)
return self._round_robin_invoke(
function=self.model_type_instance.get_num_tokens,
model=self.model,
credentials=self.credentials,
texts=texts
)
def invoke_rerank(self, query: str, docs: list[str], score_threshold: Optional[float] = None,
top_n: Optional[int] = None,
user: Optional[str] = None) \
@@ -117,7 +211,8 @@ class ModelInstance:
raise Exception("Model type instance is not RerankModel")
self.model_type_instance = cast(RerankModel, self.model_type_instance)
return self.model_type_instance.invoke(
return self._round_robin_invoke(
function=self.model_type_instance.invoke,
model=self.model,
credentials=self.credentials,
query=query,
@@ -140,7 +235,8 @@ class ModelInstance:
raise Exception("Model type instance is not ModerationModel")
self.model_type_instance = cast(ModerationModel, self.model_type_instance)
return self.model_type_instance.invoke(
return self._round_robin_invoke(
function=self.model_type_instance.invoke,
model=self.model,
credentials=self.credentials,
text=text,
@@ -160,7 +256,8 @@ class ModelInstance:
raise Exception("Model type instance is not Speech2TextModel")
self.model_type_instance = cast(Speech2TextModel, self.model_type_instance)
return self.model_type_instance.invoke(
return self._round_robin_invoke(
function=self.model_type_instance.invoke,
model=self.model,
credentials=self.credentials,
file=file,
@@ -183,7 +280,8 @@ class ModelInstance:
raise Exception("Model type instance is not TTSModel")
self.model_type_instance = cast(TTSModel, self.model_type_instance)
return self.model_type_instance.invoke(
return self._round_robin_invoke(
function=self.model_type_instance.invoke,
model=self.model,
credentials=self.credentials,
content_text=content_text,
@@ -193,6 +291,43 @@ class ModelInstance:
streaming=streaming
)
def _round_robin_invoke(self, function: callable, *args, **kwargs):
"""
Round-robin invoke
:param function: function to invoke
:param args: function args
:param kwargs: function kwargs
:return:
"""
if not self.load_balancing_manager:
return function(*args, **kwargs)
last_exception = None
while True:
lb_config = self.load_balancing_manager.fetch_next()
if not lb_config:
if not last_exception:
raise ProviderTokenNotInitError("Model credentials is not initialized.")
else:
raise last_exception
try:
if 'credentials' in kwargs:
del kwargs['credentials']
return function(*args, **kwargs, credentials=lb_config.credentials)
except InvokeRateLimitError as e:
# expire in 60 seconds
self.load_balancing_manager.cooldown(lb_config, expire=60)
last_exception = e
continue
except (InvokeAuthorizationError, InvokeConnectionError) as e:
# expire in 10 seconds
self.load_balancing_manager.cooldown(lb_config, expire=10)
last_exception = e
continue
except Exception as e:
raise e
def get_tts_voices(self, language: str) -> list:
"""
Invoke large language tts model voices
@@ -226,6 +361,7 @@ class ModelManager:
"""
if not provider:
return self.get_default_model_instance(tenant_id, model_type)
provider_model_bundle = self._provider_manager.get_provider_model_bundle(
tenant_id=tenant_id,
provider=provider,
@@ -255,3 +391,141 @@ class ModelManager:
model_type=model_type,
model=default_model_entity.model
)
class LBModelManager:
def __init__(self, tenant_id: str,
provider: str,
model_type: ModelType,
model: str,
load_balancing_configs: list[ModelLoadBalancingConfiguration],
managed_credentials: Optional[dict] = None) -> None:
"""
Load balancing model manager
:param load_balancing_configs: all load balancing configurations
:param managed_credentials: credentials if load balancing configuration name is __inherit__
"""
self._tenant_id = tenant_id
self._provider = provider
self._model_type = model_type
self._model = model
self._load_balancing_configs = load_balancing_configs
for load_balancing_config in self._load_balancing_configs:
if load_balancing_config.name == "__inherit__":
if not managed_credentials:
# remove __inherit__ if managed credentials is not provided
self._load_balancing_configs.remove(load_balancing_config)
else:
load_balancing_config.credentials = managed_credentials
def fetch_next(self) -> Optional[ModelLoadBalancingConfiguration]:
"""
Get next model load balancing config
Strategy: Round Robin
:return:
"""
cache_key = "model_lb_index:{}:{}:{}:{}".format(
self._tenant_id,
self._provider,
self._model_type.value,
self._model
)
cooldown_load_balancing_configs = []
max_index = len(self._load_balancing_configs)
while True:
current_index = redis_client.incr(cache_key)
if current_index >= 10000000:
current_index = 1
redis_client.set(cache_key, current_index)
redis_client.expire(cache_key, 3600)
if current_index > max_index:
current_index = current_index % max_index
real_index = current_index - 1
if real_index > max_index:
real_index = 0
config = self._load_balancing_configs[real_index]
if self.in_cooldown(config):
cooldown_load_balancing_configs.append(config)
if len(cooldown_load_balancing_configs) >= len(self._load_balancing_configs):
# all configs are in cooldown
return None
continue
if bool(os.environ.get("DEBUG", 'False').lower() == 'true'):
logger.info(f"Model LB\nid: {config.id}\nname:{config.name}\n"
f"tenant_id: {self._tenant_id}\nprovider: {self._provider}\n"
f"model_type: {self._model_type.value}\nmodel: {self._model}")
return config
return None
def cooldown(self, config: ModelLoadBalancingConfiguration, expire: int = 60) -> None:
"""
Cooldown model load balancing config
:param config: model load balancing config
:param expire: cooldown time
:return:
"""
cooldown_cache_key = "model_lb_index:cooldown:{}:{}:{}:{}:{}".format(
self._tenant_id,
self._provider,
self._model_type.value,
self._model,
config.id
)
redis_client.setex(cooldown_cache_key, expire, 'true')
def in_cooldown(self, config: ModelLoadBalancingConfiguration) -> bool:
"""
Check if model load balancing config is in cooldown
:param config: model load balancing config
:return:
"""
cooldown_cache_key = "model_lb_index:cooldown:{}:{}:{}:{}:{}".format(
self._tenant_id,
self._provider,
self._model_type.value,
self._model,
config.id
)
return redis_client.exists(cooldown_cache_key)
@classmethod
def get_config_in_cooldown_and_ttl(cls, tenant_id: str,
provider: str,
model_type: ModelType,
model: str,
config_id: str) -> tuple[bool, int]:
"""
Get model load balancing config is in cooldown and ttl
:param tenant_id: workspace id
:param provider: provider name
:param model_type: model type
:param model: model name
:param config_id: model load balancing config id
:return:
"""
cooldown_cache_key = "model_lb_index:cooldown:{}:{}:{}:{}:{}".format(
tenant_id,
provider,
model_type.value,
model,
config_id
)
ttl = redis_client.ttl(cooldown_cache_key)
if ttl == -2:
return False, 0
return True, ttl

View File

@@ -3,6 +3,7 @@ import os
from abc import ABC, abstractmethod
from typing import Optional
from core.helper.position_helper import get_position_map, sort_by_position_map
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.defaults import PARAMETER_RULE_TEMPLATE
from core.model_runtime.entities.model_entities import (
@@ -17,7 +18,6 @@ from core.model_runtime.entities.model_entities import (
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer
from core.tools.utils.yaml_utils import load_yaml_file
from core.utils.position_helper import get_position_map, sort_by_position_map
class AIModel(ABC):

View File

@@ -1,11 +1,11 @@
import os
from abc import ABC, abstractmethod
from core.helper.module_import_helper import get_subclasses_from_module, import_module_from_source
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
from core.model_runtime.entities.provider_entities import ProviderEntity
from core.model_runtime.model_providers.__base.ai_model import AIModel
from core.tools.utils.yaml_utils import load_yaml_file
from core.utils.module_import_helper import get_subclasses_from_module, import_module_from_source
class ModelProvider(ABC):

View File

@@ -4,13 +4,13 @@ from typing import Optional
from pydantic import BaseModel
from core.helper.module_import_helper import load_single_subclass_from_source
from core.helper.position_helper import get_position_map, sort_to_dict_by_position_map
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.provider_entities import ProviderConfig, ProviderEntity, SimpleProviderEntity
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
from core.model_runtime.schema_validators.model_credential_schema_validator import ModelCredentialSchemaValidator
from core.model_runtime.schema_validators.provider_credential_schema_validator import ProviderCredentialSchemaValidator
from core.utils.module_import_helper import load_single_subclass_from_source
from core.utils.position_helper import get_position_map, sort_to_dict_by_position_map
logger = logging.getLogger(__name__)

View File

@@ -1,10 +1,10 @@
from typing import Optional, cast
from typing import Optional
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.message_entities import PromptMessage
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.prompt.entities.advanced_prompt_entities import MemoryConfig
@@ -25,12 +25,12 @@ class PromptTransform:
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
if model_context_tokens:
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle,
model=model_config.model
)
curr_message_tokens = model_type_instance.get_num_tokens(
model_config.model,
model_config.credentials,
curr_message_tokens = model_instance.get_llm_num_tokens(
prompt_messages
)

View File

@@ -11,6 +11,8 @@ from core.entities.provider_entities import (
CustomConfiguration,
CustomModelConfiguration,
CustomProviderConfiguration,
ModelLoadBalancingConfiguration,
ModelSettings,
QuotaConfiguration,
SystemConfiguration,
)
@@ -26,13 +28,16 @@ from core.model_runtime.model_providers import model_provider_factory
from extensions import ext_hosting_provider
from extensions.ext_database import db
from models.provider import (
LoadBalancingModelConfig,
Provider,
ProviderModel,
ProviderModelSetting,
ProviderQuotaType,
ProviderType,
TenantDefaultModel,
TenantPreferredModelProvider,
)
from services.feature_service import FeatureService
class ProviderManager:
@@ -98,6 +103,13 @@ class ProviderManager:
# Get All preferred provider types of the workspace
provider_name_to_preferred_model_provider_records_dict = self._get_all_preferred_model_providers(tenant_id)
# Get All provider model settings
provider_name_to_provider_model_settings_dict = self._get_all_provider_model_settings(tenant_id)
# Get All load balancing configs
provider_name_to_provider_load_balancing_model_configs_dict \
= self._get_all_provider_load_balancing_configs(tenant_id)
provider_configurations = ProviderConfigurations(
tenant_id=tenant_id
)
@@ -147,13 +159,28 @@ class ProviderManager:
if system_configuration.enabled and has_valid_quota:
using_provider_type = ProviderType.SYSTEM
# Get provider load balancing configs
provider_model_settings = provider_name_to_provider_model_settings_dict.get(provider_name)
# Get provider load balancing configs
provider_load_balancing_configs \
= provider_name_to_provider_load_balancing_model_configs_dict.get(provider_name)
# Convert to model settings
model_settings = self._to_model_settings(
provider_entity=provider_entity,
provider_model_settings=provider_model_settings,
load_balancing_model_configs=provider_load_balancing_configs
)
provider_configuration = ProviderConfiguration(
tenant_id=tenant_id,
provider=provider_entity,
preferred_provider_type=preferred_provider_type,
using_provider_type=using_provider_type,
system_configuration=system_configuration,
custom_configuration=custom_configuration
custom_configuration=custom_configuration,
model_settings=model_settings
)
provider_configurations[provider_name] = provider_configuration
@@ -338,7 +365,7 @@ class ProviderManager:
"""
Get All preferred provider types of the workspace.
:param tenant_id:
:param tenant_id: workspace id
:return:
"""
preferred_provider_types = db.session.query(TenantPreferredModelProvider) \
@@ -353,6 +380,48 @@ class ProviderManager:
return provider_name_to_preferred_provider_type_records_dict
def _get_all_provider_model_settings(self, tenant_id: str) -> dict[str, list[ProviderModelSetting]]:
"""
Get All provider model settings of the workspace.
:param tenant_id: workspace id
:return:
"""
provider_model_settings = db.session.query(ProviderModelSetting) \
.filter(
ProviderModelSetting.tenant_id == tenant_id
).all()
provider_name_to_provider_model_settings_dict = defaultdict(list)
for provider_model_setting in provider_model_settings:
(provider_name_to_provider_model_settings_dict[provider_model_setting.provider_name]
.append(provider_model_setting))
return provider_name_to_provider_model_settings_dict
def _get_all_provider_load_balancing_configs(self, tenant_id: str) -> dict[str, list[LoadBalancingModelConfig]]:
"""
Get All provider load balancing configs of the workspace.
:param tenant_id: workspace id
:return:
"""
model_load_balancing_enabled = FeatureService.get_features(tenant_id).model_load_balancing_enabled
if not model_load_balancing_enabled:
return dict()
provider_load_balancing_configs = db.session.query(LoadBalancingModelConfig) \
.filter(
LoadBalancingModelConfig.tenant_id == tenant_id
).all()
provider_name_to_provider_load_balancing_model_configs_dict = defaultdict(list)
for provider_load_balancing_config in provider_load_balancing_configs:
(provider_name_to_provider_load_balancing_model_configs_dict[provider_load_balancing_config.provider_name]
.append(provider_load_balancing_config))
return provider_name_to_provider_load_balancing_model_configs_dict
def _init_trial_provider_records(self, tenant_id: str,
provider_name_to_provider_records_dict: dict[str, list]) -> dict[str, list]:
"""
@@ -726,3 +795,97 @@ class ProviderManager:
secret_input_form_variables.append(credential_form_schema.variable)
return secret_input_form_variables
def _to_model_settings(self, provider_entity: ProviderEntity,
provider_model_settings: Optional[list[ProviderModelSetting]] = None,
load_balancing_model_configs: Optional[list[LoadBalancingModelConfig]] = None) \
-> list[ModelSettings]:
"""
Convert to model settings.
:param provider_model_settings: provider model settings include enabled, load balancing enabled
:param load_balancing_model_configs: load balancing model configs
:return:
"""
# Get provider model credential secret variables
model_credential_secret_variables = self._extract_secret_variables(
provider_entity.model_credential_schema.credential_form_schemas
if provider_entity.model_credential_schema else []
)
model_settings = []
if not provider_model_settings:
return model_settings
for provider_model_setting in provider_model_settings:
load_balancing_configs = []
if provider_model_setting.load_balancing_enabled and load_balancing_model_configs:
for load_balancing_model_config in load_balancing_model_configs:
if (load_balancing_model_config.model_name == provider_model_setting.model_name
and load_balancing_model_config.model_type == provider_model_setting.model_type):
if not load_balancing_model_config.enabled:
continue
if not load_balancing_model_config.encrypted_config:
if load_balancing_model_config.name == "__inherit__":
load_balancing_configs.append(ModelLoadBalancingConfiguration(
id=load_balancing_model_config.id,
name=load_balancing_model_config.name,
credentials={}
))
continue
provider_model_credentials_cache = ProviderCredentialsCache(
tenant_id=load_balancing_model_config.tenant_id,
identity_id=load_balancing_model_config.id,
cache_type=ProviderCredentialsCacheType.LOAD_BALANCING_MODEL
)
# Get cached provider model credentials
cached_provider_model_credentials = provider_model_credentials_cache.get()
if not cached_provider_model_credentials:
try:
provider_model_credentials = json.loads(load_balancing_model_config.encrypted_config)
except JSONDecodeError:
continue
# Get decoding rsa key and cipher for decrypting credentials
if self.decoding_rsa_key is None or self.decoding_cipher_rsa is None:
self.decoding_rsa_key, self.decoding_cipher_rsa = encrypter.get_decrypt_decoding(
load_balancing_model_config.tenant_id)
for variable in model_credential_secret_variables:
if variable in provider_model_credentials:
try:
provider_model_credentials[variable] = encrypter.decrypt_token_with_decoding(
provider_model_credentials.get(variable),
self.decoding_rsa_key,
self.decoding_cipher_rsa
)
except ValueError:
pass
# cache provider model credentials
provider_model_credentials_cache.set(
credentials=provider_model_credentials
)
else:
provider_model_credentials = cached_provider_model_credentials
load_balancing_configs.append(ModelLoadBalancingConfiguration(
id=load_balancing_model_config.id,
name=load_balancing_model_config.name,
credentials=provider_model_credentials
))
model_settings.append(
ModelSettings(
model=provider_model_setting.model_name,
model_type=ModelType.value_of(provider_model_setting.model_type),
enabled=provider_model_setting.enabled,
load_balancing_configs=load_balancing_configs if len(load_balancing_configs) > 1 else []
)
)
return model_settings

View File

@@ -1,11 +1,10 @@
from collections.abc import Sequence
from typing import Any, Optional, cast
from typing import Any, Optional
from sqlalchemy import func
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
from core.rag.models.document import Document
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment
@@ -95,11 +94,7 @@ class DatasetDocumentStore:
# calc embedding use tokens
if embedding_model:
model_type_instance = embedding_model.model_type_instance
model_type_instance = cast(TextEmbeddingModel, model_type_instance)
tokens = model_type_instance.get_num_tokens(
model=embedding_model.model,
credentials=embedding_model.credentials,
tokens = embedding_model.get_text_embedding_num_tokens(
texts=[doc.page_content]
)
else:

View File

@@ -1,10 +1,9 @@
"""Functionality for splitting text."""
from __future__ import annotations
from typing import Any, Optional, cast
from typing import Any, Optional
from core.model_manager import ModelInstance
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer
from core.rag.splitter.text_splitter import (
TS,
@@ -35,11 +34,7 @@ class EnhanceRecursiveCharacterTextSplitter(RecursiveCharacterTextSplitter):
return 0
if embedding_model_instance:
embedding_model_type_instance = embedding_model_instance.model_type_instance
embedding_model_type_instance = cast(TextEmbeddingModel, embedding_model_type_instance)
return embedding_model_type_instance.get_num_tokens(
model=embedding_model_instance.model,
credentials=embedding_model_instance.credentials,
return embedding_model_instance.get_text_embedding_num_tokens(
texts=[text]
)
else:

View File

@@ -1,7 +1,7 @@
import os.path
from core.helper.position_helper import get_position_map, sort_by_position_map
from core.tools.entities.api_entities import UserToolProvider
from core.utils.position_helper import get_position_map, sort_by_position_map
class BuiltinToolProviderSort:

View File

@@ -2,6 +2,7 @@ from abc import abstractmethod
from os import listdir, path
from typing import Any
from core.helper.module_import_helper import load_single_subclass_from_source
from core.tools.entities.tool_entities import ToolParameter, ToolProviderCredentials, ToolProviderType
from core.tools.entities.values import ToolLabelEnum, default_tool_label_dict
from core.tools.errors import (
@@ -14,7 +15,6 @@ from core.tools.tool.builtin_tool import BuiltinTool
from core.tools.tool.tool import Tool
from core.tools.utils.tool_parameter_converter import ToolParameterConverter
from core.tools.utils.yaml_utils import load_yaml_file
from core.utils.module_import_helper import load_single_subclass_from_source
class BuiltinToolProviderController(ToolProviderController):
@@ -82,7 +82,7 @@ class BuiltinToolProviderController(ToolProviderController):
return {}
return self.credentials_schema.copy()
def get_tools(self) -> list[Tool]:
"""
returns a list of tools that the provider can provide
@@ -127,7 +127,7 @@ class BuiltinToolProviderController(ToolProviderController):
:return: type of the provider
"""
return ToolProviderType.BUILT_IN
@property
def tool_labels(self) -> list[str]:
"""
@@ -137,7 +137,7 @@ class BuiltinToolProviderController(ToolProviderController):
"""
label_enums = self._get_tool_labels()
return [default_tool_label_dict[label].name for label in label_enums]
def _get_tool_labels(self) -> list[ToolLabelEnum]:
"""
returns the labels of the provider

View File

@@ -10,6 +10,7 @@ from flask import current_app
from core.agent.entities import AgentToolEntity
from core.app.entities.app_invoke_entities import InvokeFrom
from core.helper.module_import_helper import load_single_subclass_from_source
from core.model_runtime.utils.encoders import jsonable_encoder
from core.tools.entities.api_entities import UserToolProvider, UserToolProviderTypeLiteral
from core.tools.entities.common_entities import I18nObject
@@ -31,7 +32,6 @@ from core.tools.utils.configuration import (
ToolParameterConfigurationManager,
)
from core.tools.utils.tool_parameter_converter import ToolParameterConverter
from core.utils.module_import_helper import load_single_subclass_from_source
from core.workflow.nodes.tool.entities import ToolEntity
from extensions.ext_database import db
from models.tools import ApiToolProvider, BuiltinToolProvider, WorkflowToolProvider
@@ -102,10 +102,10 @@ class ToolManager:
raise ToolProviderNotFoundError(f'provider type {provider_type} not found')
@classmethod
def get_tool_runtime(cls, provider_type: str,
def get_tool_runtime(cls, provider_type: str,
provider_id: str,
tool_name: str,
tenant_id: str,
tool_name: str,
tenant_id: str,
invoke_from: InvokeFrom = InvokeFrom.DEBUGGER,
tool_invoke_from: ToolInvokeFrom = ToolInvokeFrom.AGENT) \
-> Union[BuiltinTool, ApiTool]:
@@ -222,7 +222,7 @@ class ToolManager:
get the agent tool runtime
"""
tool_entity = cls.get_tool_runtime(
provider_type=agent_tool.provider_type,
provider_type=agent_tool.provider_type,
provider_id=agent_tool.provider_id,
tool_name=agent_tool.tool_name,
tenant_id=tenant_id,
@@ -235,7 +235,7 @@ class ToolManager:
# check file types
if parameter.type == ToolParameter.ToolParameterType.FILE:
raise ValueError(f"file type parameter {parameter.name} not supported in agent")
if parameter.form == ToolParameter.ToolParameterForm.FORM:
# save tool parameter to tool entity memory
value = cls._init_runtime_parameter(parameter, agent_tool.tool_parameters)
@@ -403,7 +403,7 @@ class ToolManager:
# get builtin providers
builtin_providers = cls.list_builtin_providers()
# get db builtin providers
db_builtin_providers: list[BuiltinToolProvider] = db.session.query(BuiltinToolProvider). \
filter(BuiltinToolProvider.tenant_id == tenant_id).all()
@@ -428,7 +428,7 @@ class ToolManager:
if 'api' in filters:
db_api_providers: list[ApiToolProvider] = db.session.query(ApiToolProvider). \
filter(ApiToolProvider.tenant_id == tenant_id).all()
api_provider_controllers = [{
'provider': provider,
'controller': ToolTransformService.api_provider_to_controller(provider)
@@ -450,7 +450,7 @@ class ToolManager:
# get workflow providers
workflow_providers: list[WorkflowToolProvider] = db.session.query(WorkflowToolProvider). \
filter(WorkflowToolProvider.tenant_id == tenant_id).all()
workflow_provider_controllers = []
for provider in workflow_providers:
try:
@@ -460,7 +460,7 @@ class ToolManager:
except Exception as e:
# app has been deleted
pass
labels = ToolLabelManager.get_tools_labels(workflow_provider_controllers)
for provider_controller in workflow_provider_controllers:

View File

@@ -73,10 +73,8 @@ class ModelInvocationUtils:
if not model_instance:
raise InvokeModelError('Model not found')
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
# get tokens
tokens = llm_model.get_num_tokens(model_instance.model, model_instance.credentials, prompt_messages)
tokens = model_instance.get_llm_num_tokens(prompt_messages)
return tokens
@@ -108,13 +106,8 @@ class ModelInvocationUtils:
tenant_id=tenant_id, model_type=ModelType.LLM,
)
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
# get model credentials
model_credentials = model_instance.credentials
# get prompt tokens
prompt_tokens = llm_model.get_num_tokens(model_instance.model, model_credentials, prompt_messages)
prompt_tokens = model_instance.get_llm_num_tokens(prompt_messages)
model_parameters = {
'temperature': 0.8,
@@ -144,9 +137,7 @@ class ModelInvocationUtils:
db.session.commit()
try:
response: LLMResult = llm_model.invoke(
model=model_instance.model,
credentials=model_credentials,
response: LLMResult = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=model_parameters,
tools=[], stop=[], stream=False, user=user_id, callbacks=[]
@@ -176,4 +167,4 @@ class ModelInvocationUtils:
db.session.commit()
return response
return response

View File

@@ -4,9 +4,9 @@ from typing import Optional, Union, cast
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageRole
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate
@@ -200,12 +200,12 @@ class QuestionClassifierNode(LLMNode):
model_context_tokens = model_config.model_schema.model_properties.get(ModelPropertyKey.CONTEXT_SIZE)
if model_context_tokens:
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
model_instance = ModelInstance(
provider_model_bundle=model_config.provider_model_bundle,
model=model_config.model
)
curr_message_tokens = model_type_instance.get_num_tokens(
model_config.model,
model_config.credentials,
curr_message_tokens = model_instance.get_llm_num_tokens(
prompt_messages
)