fix: next suggest question logic problem (#6451)

Co-authored-by: evenyan <yikun.yan@ubtrobot.com>
This commit is contained in:
Even
2024-07-19 20:26:11 +08:00
committed by GitHub
parent 48f872a68c
commit c013086e64
2 changed files with 2 additions and 1 deletions

View File

@@ -103,7 +103,7 @@ class TokenBufferMemory:
if curr_message_tokens > max_token_limit:
pruned_memory = []
while curr_message_tokens > max_token_limit and prompt_messages:
while curr_message_tokens > max_token_limit and len(prompt_messages)>1:
pruned_memory.append(prompt_messages.pop(0))
curr_message_tokens = self.model_instance.get_llm_num_tokens(
prompt_messages