FEAT: NEW WORKFLOW ENGINE (#3160)

Co-authored-by: Joel <iamjoel007@gmail.com>
Co-authored-by: Yeuoly <admin@srmxy.cn>
Co-authored-by: JzoNg <jzongcode@gmail.com>
Co-authored-by: StyleZhang <jasonapring2015@outlook.com>
Co-authored-by: jyong <jyong@dify.ai>
Co-authored-by: nite-knite <nkCoding@gmail.com>
Co-authored-by: jyong <718720800@qq.com>
This commit is contained in:
takatost
2024-04-08 18:51:46 +08:00
committed by GitHub
parent 2fb9850af5
commit 7753ba2d37
1161 changed files with 103836 additions and 10327 deletions

View File

@@ -5,6 +5,7 @@ from werkzeug.datastructures import FileStorage
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from models.model import App, AppMode, AppModelConfig
from services.errors.audio import (
AudioTooLargeServiceError,
NoAudioUploadedServiceError,
@@ -20,7 +21,21 @@ ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm', 'amr']
class AudioService:
@classmethod
def transcript_asr(cls, tenant_id: str, file: FileStorage, end_user: Optional[str] = None):
def transcript_asr(cls, app_model: App, file: FileStorage, end_user: Optional[str] = None):
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
workflow = app_model.workflow
if workflow is None:
raise ValueError("Speech to text is not enabled")
features_dict = workflow.features_dict
if 'speech_to_text' not in features_dict or not features_dict['speech_to_text'].get('enabled'):
raise ValueError("Speech to text is not enabled")
else:
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise ValueError("Speech to text is not enabled")
if file is None:
raise NoAudioUploadedServiceError()
@@ -37,7 +52,7 @@ class AudioService:
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
tenant_id=app_model.tenant_id,
model_type=ModelType.SPEECH2TEXT
)
if model_instance is None:
@@ -49,17 +64,42 @@ class AudioService:
return {"text": model_instance.invoke_speech2text(file=buffer, user=end_user)}
@classmethod
def transcript_tts(cls, tenant_id: str, text: str, voice: str, streaming: bool, end_user: Optional[str] = None):
def transcript_tts(cls, app_model: App, text: str, streaming: bool,
voice: Optional[str] = None, end_user: Optional[str] = None):
if app_model.mode in [AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value]:
workflow = app_model.workflow
if workflow is None:
raise ValueError("TTS is not enabled")
features_dict = workflow.features_dict
if 'text_to_speech' not in features_dict or not features_dict['text_to_speech'].get('enabled'):
raise ValueError("TTS is not enabled")
voice = features_dict['text_to_speech'].get('voice') if voice is None else voice
else:
text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
if not text_to_speech_dict.get('enabled'):
raise ValueError("TTS is not enabled")
voice = text_to_speech_dict.get('voice') if voice is None else voice
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
tenant_id=app_model.tenant_id,
model_type=ModelType.TTS
)
if model_instance is None:
raise ProviderNotSupportTextToSpeechServiceError()
try:
return model_instance.invoke_tts(content_text=text.strip(), user=end_user, streaming=streaming, tenant_id=tenant_id, voice=voice)
return model_instance.invoke_tts(
content_text=text.strip(),
user=end_user,
streaming=streaming,
tenant_id=app_model.tenant_id,
voice=voice
)
except Exception as e:
raise e