mirror of
http://112.124.100.131/huang.ze/ebiz-dify-ai.git
synced 2025-12-25 02:33:00 +08:00
Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
@@ -12,17 +12,17 @@ def test_validate_credentials():
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': 'ww' + os.environ.get('OPENLLM_SERVER_URL'),
|
||||
}
|
||||
"server_url": "ww" + os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
}
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -30,33 +30,28 @@ def test_invoke_model():
|
||||
model = OpenLLMTextEmbeddingModel()
|
||||
|
||||
result = model.invoke(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world"
|
||||
],
|
||||
user="abc-123"
|
||||
texts=["hello", "world"],
|
||||
user="abc-123",
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert len(result.embeddings) == 2
|
||||
assert result.usage.total_tokens > 0
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = OpenLLMTextEmbeddingModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world"
|
||||
]
|
||||
texts=["hello", "world"],
|
||||
)
|
||||
|
||||
assert num_tokens == 2
|
||||
|
||||
@@ -14,67 +14,61 @@ def test_validate_credentials_for_chat_model():
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': 'invalid_key',
|
||||
}
|
||||
"server_url": "invalid_key",
|
||||
},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
}
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
prompt_messages=[UserPromptMessage(content="Hello World!")],
|
||||
model_parameters={
|
||||
'temperature': 0.7,
|
||||
'top_p': 1.0,
|
||||
'top_k': 1,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1.0,
|
||||
"top_k": 1,
|
||||
},
|
||||
stop=['you'],
|
||||
stop=["you"],
|
||||
user="abc-123",
|
||||
stream=False
|
||||
stream=False,
|
||||
)
|
||||
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.message.content) > 0
|
||||
assert response.usage.total_tokens > 0
|
||||
|
||||
|
||||
def test_invoke_stream_model():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
prompt_messages=[UserPromptMessage(content="Hello World!")],
|
||||
model_parameters={
|
||||
'temperature': 0.7,
|
||||
'top_p': 1.0,
|
||||
'top_k': 1,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1.0,
|
||||
"top_k": 1,
|
||||
},
|
||||
stop=['you'],
|
||||
stop=["you"],
|
||||
stream=True,
|
||||
user="abc-123"
|
||||
user="abc-123",
|
||||
)
|
||||
|
||||
assert isinstance(response, Generator)
|
||||
@@ -84,21 +78,18 @@ def test_invoke_stream_model():
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
response = model.get_num_tokens(
|
||||
model='NOT IMPORTANT',
|
||||
model="NOT IMPORTANT",
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
"server_url": os.environ.get("OPENLLM_SERVER_URL"),
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
tools=[]
|
||||
prompt_messages=[UserPromptMessage(content="Hello World!")],
|
||||
tools=[],
|
||||
)
|
||||
|
||||
assert isinstance(response, int)
|
||||
assert response == 3
|
||||
assert response == 3
|
||||
|
||||
Reference in New Issue
Block a user