mirror of
http://112.124.100.131/huang.ze/ebiz-dify-ai.git
synced 2025-12-09 10:56:52 +08:00
chore: apply ruff rules on tests and app.py (#3605)
This commit is contained in:
@@ -1,22 +1,32 @@
|
||||
import os
|
||||
from collections.abc import Iterable
|
||||
from time import sleep
|
||||
from typing import Any, Literal, Union, Iterable
|
||||
|
||||
from anthropic.resources import Messages
|
||||
from anthropic.types.message_delta_event import Delta
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
import anthropic
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from anthropic import Anthropic, Stream
|
||||
from anthropic.types import MessageParam, Message, MessageStreamEvent, \
|
||||
ContentBlock, MessageStartEvent, Usage, TextDelta, MessageDeltaEvent, MessageStopEvent, ContentBlockDeltaEvent, \
|
||||
MessageDeltaUsage
|
||||
from anthropic.resources import Messages
|
||||
from anthropic.types import (
|
||||
ContentBlock,
|
||||
ContentBlockDeltaEvent,
|
||||
Message,
|
||||
MessageDeltaEvent,
|
||||
MessageDeltaUsage,
|
||||
MessageParam,
|
||||
MessageStartEvent,
|
||||
MessageStopEvent,
|
||||
MessageStreamEvent,
|
||||
TextDelta,
|
||||
Usage,
|
||||
)
|
||||
from anthropic.types.message_delta_event import Delta
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
|
||||
|
||||
|
||||
class MockAnthropicClass(object):
|
||||
class MockAnthropicClass:
|
||||
@staticmethod
|
||||
def mocked_anthropic_chat_create_sync(model: str) -> Message:
|
||||
return Message(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Generator, List
|
||||
from collections.abc import Generator
|
||||
|
||||
import google.generativeai.types.content_types as content_types
|
||||
import google.generativeai.types.generation_types as generation_config_types
|
||||
@@ -6,15 +6,15 @@ import google.generativeai.types.safety_types as safety_types
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from google.ai import generativelanguage as glm
|
||||
from google.ai.generativelanguage_v1beta.types import content as gag_content
|
||||
from google.generativeai import GenerativeModel
|
||||
from google.generativeai.client import _ClientManager, configure
|
||||
from google.generativeai.types import GenerateContentResponse
|
||||
from google.generativeai.types.generation_types import BaseGenerateContentResponse
|
||||
from google.ai.generativelanguage_v1beta.types import content as gag_content
|
||||
|
||||
current_api_key = ''
|
||||
|
||||
class MockGoogleResponseClass(object):
|
||||
class MockGoogleResponseClass:
|
||||
_done = False
|
||||
|
||||
def __iter__(self):
|
||||
@@ -41,7 +41,7 @@ class MockGoogleResponseClass(object):
|
||||
chunks=[]
|
||||
)
|
||||
|
||||
class MockGoogleResponseCandidateClass(object):
|
||||
class MockGoogleResponseCandidateClass:
|
||||
finish_reason = 'stop'
|
||||
|
||||
@property
|
||||
@@ -52,7 +52,7 @@ class MockGoogleResponseCandidateClass(object):
|
||||
]
|
||||
)
|
||||
|
||||
class MockGoogleClass(object):
|
||||
class MockGoogleClass:
|
||||
@staticmethod
|
||||
def generate_content_sync() -> GenerateContentResponse:
|
||||
return GenerateContentResponse(
|
||||
@@ -91,7 +91,7 @@ class MockGoogleClass(object):
|
||||
return 'it\'s google!'
|
||||
|
||||
@property
|
||||
def generative_response_candidates(self) -> List[MockGoogleResponseCandidateClass]:
|
||||
def generative_response_candidates(self) -> list[MockGoogleResponseCandidateClass]:
|
||||
return [MockGoogleResponseCandidateClass()]
|
||||
|
||||
def make_client(self: _ClientManager, name: str):
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from huggingface_hub import InferenceClient
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.huggingface_chat import MockHuggingfaceChatClass
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
import re
|
||||
from typing import Any, Generator, List, Literal, Optional, Union
|
||||
from collections.abc import Generator
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from huggingface_hub import InferenceClient
|
||||
from huggingface_hub.inference._text_generation import (Details, StreamDetails, TextGenerationResponse,
|
||||
TextGenerationStreamResponse, Token)
|
||||
from huggingface_hub.inference._text_generation import (
|
||||
Details,
|
||||
StreamDetails,
|
||||
TextGenerationResponse,
|
||||
TextGenerationStreamResponse,
|
||||
Token,
|
||||
)
|
||||
from huggingface_hub.utils import BadRequestError
|
||||
|
||||
|
||||
class MockHuggingfaceChatClass(object):
|
||||
class MockHuggingfaceChatClass:
|
||||
@staticmethod
|
||||
def generate_create_sync(model: str) -> TextGenerationResponse:
|
||||
response = TextGenerationResponse(
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import os
|
||||
from typing import Callable, List, Literal
|
||||
from collections.abc import Callable
|
||||
from typing import Literal
|
||||
|
||||
import pytest
|
||||
|
||||
# import monkeypatch
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from openai.resources.audio.transcriptions import Transcriptions
|
||||
@@ -10,6 +12,7 @@ from openai.resources.completions import Completions
|
||||
from openai.resources.embeddings import Embeddings
|
||||
from openai.resources.models import Models
|
||||
from openai.resources.moderations import Moderations
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.openai_chat import MockChatClass
|
||||
from tests.integration_tests.model_runtime.__mock.openai_completion import MockCompletionsClass
|
||||
from tests.integration_tests.model_runtime.__mock.openai_embeddings import MockEmbeddingsClass
|
||||
@@ -18,7 +21,7 @@ from tests.integration_tests.model_runtime.__mock.openai_remote import MockModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai_speech2text import MockSpeech2TextClass
|
||||
|
||||
|
||||
def mock_openai(monkeypatch: MonkeyPatch, methods: List[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
|
||||
def mock_openai(monkeypatch: MonkeyPatch, methods: list[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
|
||||
"""
|
||||
mock openai module
|
||||
|
||||
|
||||
@@ -1,31 +1,44 @@
|
||||
import re
|
||||
from collections.abc import Generator
|
||||
from json import dumps, loads
|
||||
from time import sleep, time
|
||||
|
||||
# import monkeypatch
|
||||
from typing import Any, Generator, List, Literal, Optional, Union
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
import openai.types.chat.completion_create_params as completion_create_params
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai import AzureOpenAI, OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.chat.completions import Completions
|
||||
from openai.types import Completion as CompletionMessage
|
||||
from openai.types.chat import (ChatCompletion, ChatCompletionChunk, ChatCompletionMessageParam,
|
||||
ChatCompletionMessageToolCall, ChatCompletionToolChoiceOptionParam,
|
||||
ChatCompletionToolParam)
|
||||
from openai.types.chat import (
|
||||
ChatCompletion,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionMessageToolCall,
|
||||
ChatCompletionToolChoiceOptionParam,
|
||||
ChatCompletionToolParam,
|
||||
)
|
||||
from openai.types.chat.chat_completion import ChatCompletion as _ChatCompletion
|
||||
from openai.types.chat.chat_completion import Choice as _ChatCompletionChoice
|
||||
from openai.types.chat.chat_completion_chunk import (Choice, ChoiceDelta, ChoiceDeltaFunctionCall, ChoiceDeltaToolCall,
|
||||
ChoiceDeltaToolCallFunction)
|
||||
from openai.types.chat.chat_completion_chunk import (
|
||||
Choice,
|
||||
ChoiceDelta,
|
||||
ChoiceDeltaFunctionCall,
|
||||
ChoiceDeltaToolCall,
|
||||
ChoiceDeltaToolCallFunction,
|
||||
)
|
||||
from openai.types.chat.chat_completion_message import ChatCompletionMessage, FunctionCall
|
||||
from openai.types.chat.chat_completion_message_tool_call import Function
|
||||
from openai.types.completion_usage import CompletionUsage
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockChatClass(object):
|
||||
|
||||
class MockChatClass:
|
||||
@staticmethod
|
||||
def generate_function_call(
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
) -> Optional[FunctionCall]:
|
||||
if not functions or len(functions) == 0:
|
||||
return None
|
||||
@@ -61,8 +74,8 @@ class MockChatClass(object):
|
||||
|
||||
@staticmethod
|
||||
def generate_tool_calls(
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> Optional[List[ChatCompletionMessageToolCall]]:
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> Optional[list[ChatCompletionMessageToolCall]]:
|
||||
list_tool_calls = []
|
||||
if not tools or len(tools) == 0:
|
||||
return None
|
||||
@@ -91,8 +104,8 @@ class MockChatClass(object):
|
||||
@staticmethod
|
||||
def mocked_openai_chat_create_sync(
|
||||
model: str,
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> CompletionMessage:
|
||||
tool_calls = []
|
||||
function_call = MockChatClass.generate_function_call(functions=functions)
|
||||
@@ -128,8 +141,8 @@ class MockChatClass(object):
|
||||
@staticmethod
|
||||
def mocked_openai_chat_create_stream(
|
||||
model: str,
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> Generator[ChatCompletionChunk, None, None]:
|
||||
tool_calls = []
|
||||
function_call = MockChatClass.generate_function_call(functions=functions)
|
||||
@@ -197,17 +210,17 @@ class MockChatClass(object):
|
||||
)
|
||||
|
||||
def chat_create(self: Completions, *,
|
||||
messages: List[ChatCompletionMessageParam],
|
||||
messages: list[ChatCompletionMessageParam],
|
||||
model: Union[str,Literal[
|
||||
"gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613",
|
||||
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613"],
|
||||
],
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any,
|
||||
):
|
||||
openai_models = [
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import re
|
||||
from collections.abc import Generator
|
||||
from time import sleep, time
|
||||
# import monkeypatch
|
||||
from typing import Any, Generator, List, Literal, Optional, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
# import monkeypatch
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from openai import AzureOpenAI, BadRequestError, OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.completions import Completions
|
||||
@@ -11,8 +12,10 @@ from openai.types import Completion as CompletionMessage
|
||||
from openai.types.completion import CompletionChoice
|
||||
from openai.types.completion_usage import CompletionUsage
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockCompletionsClass(object):
|
||||
|
||||
class MockCompletionsClass:
|
||||
@staticmethod
|
||||
def mocked_openai_completion_create_sync(
|
||||
model: str
|
||||
@@ -90,7 +93,7 @@ class MockCompletionsClass(object):
|
||||
"code-davinci-002", "text-curie-001", "text-babbage-001",
|
||||
"text-ada-001"],
|
||||
],
|
||||
prompt: Union[str, List[str], List[int], List[List[int]], None],
|
||||
prompt: Union[str, list[str], list[int], list[list[int]], None],
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any
|
||||
):
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
import re
|
||||
from typing import Any, List, Literal, Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai import OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.embeddings import Embeddings
|
||||
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
|
||||
from openai.types.embedding import Embedding
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockEmbeddingsClass(object):
|
||||
|
||||
class MockEmbeddingsClass:
|
||||
def create_embeddings(
|
||||
self: Embeddings, *,
|
||||
input: Union[str, List[str], List[int], List[List[int]]],
|
||||
input: Union[str, list[str], list[int], list[list[int]]],
|
||||
model: Union[str, Literal["text-embedding-ada-002"]],
|
||||
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
import re
|
||||
from typing import Any, List, Literal, Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.moderations import Moderations
|
||||
from openai.types import ModerationCreateResponse
|
||||
from openai.types.moderation import Categories, CategoryScores, Moderation
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockModerationClass(object):
|
||||
|
||||
class MockModerationClass:
|
||||
def moderation_create(self: Moderations,*,
|
||||
input: Union[str, List[str]],
|
||||
input: Union[str, list[str]],
|
||||
model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any
|
||||
) -> ModerationCreateResponse:
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
from time import time
|
||||
from typing import List
|
||||
|
||||
from openai.resources.models import Models
|
||||
from openai.types.model import Model
|
||||
|
||||
|
||||
class MockModelClass(object):
|
||||
class MockModelClass:
|
||||
"""
|
||||
mock class for openai.models.Models
|
||||
"""
|
||||
def list(
|
||||
self,
|
||||
**kwargs,
|
||||
) -> List[Model]:
|
||||
) -> list[Model]:
|
||||
return [
|
||||
Model(
|
||||
id='ft:gpt-3.5-turbo-0613:personal::8GYJLPDQ',
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import re
|
||||
from typing import Any, List, Literal, Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai._types import NOT_GIVEN, FileTypes, NotGiven
|
||||
from openai.resources.audio.transcriptions import Transcriptions
|
||||
from openai.types.audio.transcription import Transcription
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockSpeech2TextClass(object):
|
||||
|
||||
class MockSpeech2TextClass:
|
||||
def speech2text_create(self: Transcriptions,
|
||||
*,
|
||||
file: FileTypes,
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
import os
|
||||
import re
|
||||
from typing import List, Union
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from requests import Response
|
||||
from requests.exceptions import ConnectionError
|
||||
from requests.sessions import Session
|
||||
from xinference_client.client.restful.restful_client import (Client, RESTfulChatglmCppChatModelHandle,
|
||||
RESTfulChatModelHandle, RESTfulEmbeddingModelHandle,
|
||||
RESTfulGenerateModelHandle, RESTfulRerankModelHandle)
|
||||
from xinference_client.client.restful.restful_client import (
|
||||
Client,
|
||||
RESTfulChatglmCppChatModelHandle,
|
||||
RESTfulChatModelHandle,
|
||||
RESTfulEmbeddingModelHandle,
|
||||
RESTfulGenerateModelHandle,
|
||||
RESTfulRerankModelHandle,
|
||||
)
|
||||
from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage
|
||||
|
||||
|
||||
class MockXinferenceClass(object):
|
||||
class MockXinferenceClass:
|
||||
def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
|
||||
if not re.match(r'https?:\/\/[^\s\/$.?#].[^\s]*$', self.base_url):
|
||||
raise RuntimeError('404 Not Found')
|
||||
@@ -101,7 +106,7 @@ class MockXinferenceClass(object):
|
||||
def _check_cluster_authenticated(self):
|
||||
self._cluster_authed = True
|
||||
|
||||
def rerank(self: RESTfulRerankModelHandle, documents: List[str], query: str, top_n: int) -> dict:
|
||||
def rerank(self: RESTfulRerankModelHandle, documents: list[str], query: str, top_n: int) -> dict:
|
||||
# check if self._model_uid is a valid uuid
|
||||
if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \
|
||||
self._model_uid != 'rerank':
|
||||
@@ -126,7 +131,7 @@ class MockXinferenceClass(object):
|
||||
|
||||
def create_embedding(
|
||||
self: RESTfulGenerateModelHandle,
|
||||
input: Union[str, List[str]],
|
||||
input: Union[str, list[str]],
|
||||
**kwargs
|
||||
) -> dict:
|
||||
# check if self._model_uid is a valid uuid
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.anthropic.anthropic import AnthropicProvider
|
||||
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
PromptMessageTool, SystemPromptMessage,
|
||||
TextPromptMessageContent, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.azure_openai.llm.llm import AzureOpenAILargeLanguageModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.azure_openai.text_embedding.text_embedding import AzureOpenAITextEmbeddingModel
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
from time import sleep
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.baichuan.baichuan import BaichuanProvider
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.baichuan.text_embedding.text_embedding import BaichuanTextEmbeddingModel
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.bedrock.bedrock import BedrockProvider
|
||||
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.chatglm.llm.llm import ChatGLMLargeLanguageModel
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.chatglm.chatglm import ChatGLMProvider
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.cohere.cohere import CohereProvider
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.cohere.rerank.rerank import CohereRerankModel
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.cohere.text_embedding.text_embedding import CohereTextEmbeddingModel
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.google.llm.llm import GoogleLargeLanguageModel
|
||||
from tests.integration_tests.model_runtime.__mock.google import setup_google_mock
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.google.google import GoogleProvider
|
||||
from tests.integration_tests.model_runtime.__mock.google import setup_google_mock
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import \
|
||||
HuggingfaceHubTextEmbeddingModel
|
||||
from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import (
|
||||
HuggingfaceHubTextEmbeddingModel,
|
||||
)
|
||||
|
||||
|
||||
def test_hosted_inference_api_validate_credentials():
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.jina.jina import JinaProvider
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.jina.text_embedding.text_embedding import JinaTextEmbeddingModel
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ParameterRule
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.localai.llm.llm import LocalAILarguageModel
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.minimax.text_embedding.text_embedding import MinimaxTextEmbeddingModel
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
from time import sleep
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.minimax.minimax import MinimaxProvider
|
||||
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.ollama.llm.llm import OllamaLargeLanguageModel
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.ollama.text_embedding.text_embedding import OllamaEmbeddingModel
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
PromptMessageTool, SystemPromptMessage,
|
||||
TextPromptMessageContent, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.openai import OpenAIProvider
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.speech2text.speech2text import OpenAISpeech2TextModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.text_embedding.text_embedding import OpenAITextEmbeddingModel
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import \
|
||||
OAICompatEmbeddingModel
|
||||
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import (
|
||||
OAICompatEmbeddingModel,
|
||||
)
|
||||
|
||||
"""
|
||||
Using OpenAI's API as testing endpoint
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openllm.text_embedding.text_embedding import OpenLLMTextEmbeddingModel
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openrouter.llm.llm import OpenRouterLargeLanguageModel
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.replicate.text_embedding.text_embedding import ReplicateEmbeddingModel
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.spark.spark import SparkProvider
|
||||
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.togetherai.llm.llm import TogetherAILargeLanguageModel
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.tongyi.tongyi import TongyiProvider
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
from time import sleep
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.wenxin.wenxin import WenxinProvider
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.xinference.text_embedding.text_embedding import XinferenceTextEmbeddingModel
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.xinference.rerank.rerank import XinferenceRerankModel
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.zhipuai.llm.llm import ZhipuAILargeLanguageModel
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.zhipuai.zhipuai import ZhipuaiProvider
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.zhipuai.text_embedding.text_embedding import ZhipuAITextEmbeddingModel
|
||||
|
||||
Reference in New Issue
Block a user