feat: re-add prompt messages to result and chunks in llm (#17883)

Signed-off-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
-LAN-
2025-04-11 18:04:49 +09:00
committed by GitHub
parent 5f8d20b5b2
commit 8e6f6d64a4
5 changed files with 24 additions and 15 deletions

View File

@@ -45,7 +45,7 @@ class LargeLanguageModel(AIModel):
stream: bool = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> Union[LLMResult, Generator]:
) -> Union[LLMResult, Generator[LLMResultChunk, None, None]]:
"""
Invoke large language model
@@ -205,22 +205,26 @@ class LargeLanguageModel(AIModel):
user=user,
callbacks=callbacks,
)
return result
# Following https://github.com/langgenius/dify/issues/17799,
# we removed the prompt_messages from the chunk on the plugin daemon side.
# To ensure compatibility, we add the prompt_messages back here.
result.prompt_messages = prompt_messages
return result
raise NotImplementedError("unsupported invoke result type", type(result))
def _invoke_result_generator(
self,
model: str,
result: Generator,
credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None,
stream: bool = True,
user: Optional[str] = None,
callbacks: Optional[list[Callback]] = None,
) -> Generator:
) -> Generator[LLMResultChunk, None, None]:
"""
Invoke result generator
@@ -235,6 +239,10 @@ class LargeLanguageModel(AIModel):
try:
for chunk in result:
# Following https://github.com/langgenius/dify/issues/17799,
# we removed the prompt_messages from the chunk on the plugin daemon side.
# To ensure compatibility, we add the prompt_messages back here.
chunk.prompt_messages = prompt_messages
yield chunk
self._trigger_new_chunk_callbacks(
@@ -403,7 +411,7 @@ class LargeLanguageModel(AIModel):
chunk: LLMResultChunk,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None,
@@ -450,7 +458,7 @@ class LargeLanguageModel(AIModel):
model: str,
result: LLMResult,
credentials: dict,
prompt_messages: list[PromptMessage],
prompt_messages: Sequence[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None,