feat: server multi models support (#799)

This commit is contained in:
takatost
2023-08-12 00:57:00 +08:00
committed by GitHub
parent d8b712b325
commit 5fa2161b05
213 changed files with 10556 additions and 2579 deletions

View File

@@ -1,15 +1,13 @@
import io
from werkzeug.datastructures import FileStorage
from core.llm.llm_builder import LLMBuilder
from core.llm.provider.llm_provider_service import LLMProviderService
from core.model_providers.model_factory import ModelFactory
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
from core.llm.whisper import Whisper
from models.provider import ProviderName
FILE_SIZE = 15
FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024
ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm']
class AudioService:
@classmethod
def transcript(cls, tenant_id: str, file: FileStorage):
@@ -26,14 +24,12 @@ class AudioService:
if file_size > FILE_SIZE_LIMIT:
message = f"Audio size larger than {FILE_SIZE} mb"
raise AudioTooLargeServiceError(message)
provider_name = LLMBuilder.get_default_provider(tenant_id, 'whisper-1')
if provider_name != ProviderName.OPENAI.value:
raise ProviderNotSupportSpeechToTextServiceError()
provider_service = LLMProviderService(tenant_id, provider_name)
model = ModelFactory.get_speech2text_model(
tenant_id=tenant_id
)
buffer = io.BytesIO(file_content)
buffer.name = 'temp.mp3'
return Whisper(provider_service.provider).transcribe(buffer)
return model.run(buffer)