mirror of
http://112.124.100.131/huang.ze/ebiz-dify-ai.git
synced 2025-12-24 10:13:01 +08:00
Model Runtime (#1858)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com> Co-authored-by: Garfield Dai <dai.hai@foxmail.com> Co-authored-by: chenhe <guchenhe@gmail.com> Co-authored-by: jyong <jyong@dify.ai> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: Yeuoly <admin@srmxy.cn>
This commit is contained in:
382
api/tests/integration_tests/model_runtime/openai/test_llm.py
Normal file
382
api/tests/integration_tests/model_runtime/openai/test_llm.py
Normal file
File diff suppressed because one or more lines are too long
@@ -0,0 +1,55 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['moderation']], indirect=True)
|
||||
def test_validate_credentials(setup_openai_mock):
|
||||
model = OpenAIModerationModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='text-moderation-stable',
|
||||
credentials={
|
||||
'openai_api_key': 'invalid_key'
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='text-moderation-stable',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
}
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['moderation']], indirect=True)
|
||||
def test_invoke_model(setup_openai_mock):
|
||||
model = OpenAIModerationModel()
|
||||
|
||||
result = model.invoke(
|
||||
model='text-moderation-stable',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
},
|
||||
text="hello",
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(result, bool)
|
||||
assert result is False
|
||||
|
||||
result = model.invoke(
|
||||
model='text-moderation-stable',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
},
|
||||
text="i will kill you",
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(result, bool)
|
||||
assert result is True
|
||||
@@ -0,0 +1,23 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.openai import OpenAIProvider
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['chat']], indirect=True)
|
||||
def test_validate_provider_credentials(setup_openai_mock):
|
||||
provider = OpenAIProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(
|
||||
credentials={}
|
||||
)
|
||||
|
||||
provider.validate_provider_credentials(
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
}
|
||||
)
|
||||
@@ -0,0 +1,56 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.speech2text.speech2text import OpenAISpeech2TextModel
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['speech2text']], indirect=True)
|
||||
def test_validate_credentials(setup_openai_mock):
|
||||
model = OpenAISpeech2TextModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='whisper-1',
|
||||
credentials={
|
||||
'openai_api_key': 'invalid_key'
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='whisper-1',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
}
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['speech2text']], indirect=True)
|
||||
def test_invoke_model(setup_openai_mock):
|
||||
model = OpenAISpeech2TextModel()
|
||||
|
||||
# Get the directory of the current file
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Get assets directory
|
||||
assets_dir = os.path.join(os.path.dirname(current_dir), 'assets')
|
||||
|
||||
# Construct the path to the audio file
|
||||
audio_file_path = os.path.join(assets_dir, 'audio.mp3')
|
||||
|
||||
# Open the file and get the file object
|
||||
with open(audio_file_path, 'rb') as audio_file:
|
||||
file = audio_file
|
||||
|
||||
result = model.invoke(
|
||||
model='whisper-1',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
},
|
||||
file=file,
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == '1, 2, 3, 4, 5, 6, 7, 8, 9, 10'
|
||||
@@ -0,0 +1,67 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.text_embedding.text_embedding import OpenAITextEmbeddingModel
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['text_embedding']], indirect=True)
|
||||
def test_validate_credentials(setup_openai_mock):
|
||||
model = OpenAITextEmbeddingModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='text-embedding-ada-002',
|
||||
credentials={
|
||||
'openai_api_key': 'invalid_key'
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='text-embedding-ada-002',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY')
|
||||
}
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize('setup_openai_mock', [['text_embedding']], indirect=True)
|
||||
def test_invoke_model(setup_openai_mock):
|
||||
model = OpenAITextEmbeddingModel()
|
||||
|
||||
result = model.invoke(
|
||||
model='text-embedding-ada-002',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY'),
|
||||
'openai_api_base': 'https://api.openai.com'
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world"
|
||||
],
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert len(result.embeddings) == 2
|
||||
assert result.usage.total_tokens == 2
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = OpenAITextEmbeddingModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model='text-embedding-ada-002',
|
||||
credentials={
|
||||
'openai_api_key': os.environ.get('OPENAI_API_KEY'),
|
||||
'openai_api_base': 'https://api.openai.com'
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world"
|
||||
]
|
||||
)
|
||||
|
||||
assert num_tokens == 2
|
||||
Reference in New Issue
Block a user