mirror of
http://112.124.100.131/huang.ze/ebiz-dify-ai.git
synced 2025-12-10 03:16:51 +08:00
Fix variable typo (#8084)
This commit is contained in:
@@ -29,7 +29,7 @@ credentials_for_provider:
|
||||
en_US: Please input your OpenAI API key
|
||||
zh_Hans: 请输入你的 OpenAI API key
|
||||
pt_BR: Please input your OpenAI API key
|
||||
openai_organizaion_id:
|
||||
openai_organization_id:
|
||||
type: text-input
|
||||
required: false
|
||||
label:
|
||||
|
||||
@@ -16,7 +16,7 @@ class DallE2Tool(BuiltinTool):
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
openai_organization = self.runtime.credentials.get('openai_organizaion_id', None)
|
||||
openai_organization = self.runtime.credentials.get('openai_organization_id', None)
|
||||
if not openai_organization:
|
||||
openai_organization = None
|
||||
openai_base_url = self.runtime.credentials.get('openai_base_url', None)
|
||||
|
||||
@@ -17,7 +17,7 @@ class DallE3Tool(BuiltinTool):
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
openai_organization = self.runtime.credentials.get('openai_organizaion_id', None)
|
||||
openai_organization = self.runtime.credentials.get('openai_organization_id', None)
|
||||
if not openai_organization:
|
||||
openai_organization = None
|
||||
openai_base_url = self.runtime.credentials.get('openai_base_url', None)
|
||||
|
||||
@@ -4,7 +4,7 @@ from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class GihubProvider(BuiltinToolProviderController):
|
||||
class GithubProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
try:
|
||||
if 'access_tokens' not in credentials or not credentials.get('access_tokens'):
|
||||
|
||||
@@ -9,7 +9,7 @@ from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class GihubRepositoriesTool(BuiltinTool):
|
||||
class GithubRepositoriesTool(BuiltinTool):
|
||||
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
|
||||
@@ -60,7 +60,7 @@ class GitlabCommitsTool(BuiltinTool):
|
||||
project_name = project['name']
|
||||
print(f"Project: {project_name}")
|
||||
|
||||
# Get all of proejct commits
|
||||
# Get all of project commits
|
||||
commits_url = f"{domain}/api/v4/projects/{project_id}/repository/commits"
|
||||
params = {
|
||||
'since': start_time,
|
||||
@@ -83,7 +83,7 @@ class GitlabCommitsTool(BuiltinTool):
|
||||
diffs = diff_response.json()
|
||||
|
||||
for diff in diffs:
|
||||
# Caculate code lines of changed
|
||||
# Calculate code lines of changed
|
||||
added_lines = diff['diff'].count('\n+')
|
||||
removed_lines = diff['diff'].count('\n-')
|
||||
total_changes = added_lines + removed_lines
|
||||
|
||||
@@ -6,7 +6,7 @@ identity:
|
||||
zh_Hans: GitLab 提交内容查询
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for query GitLab commits, Input should be a exists username or projec.
|
||||
en_US: A tool for query GitLab commits, Input should be a exists username or project.
|
||||
zh_Hans: 一个用于查询 GitLab 代码提交内容的工具,输入的内容应该是一个已存在的用户名或者项目名。
|
||||
llm: A tool for query GitLab commits, Input should be a exists username or project.
|
||||
parameters:
|
||||
|
||||
@@ -29,7 +29,7 @@ class OpenweatherTool(BuiltinTool):
|
||||
# request URL
|
||||
url = "https://api.openweathermap.org/data/2.5/weather"
|
||||
|
||||
# request parmas
|
||||
# request params
|
||||
params = {
|
||||
"q": city,
|
||||
"appid": self.runtime.credentials.get("api_key"),
|
||||
|
||||
@@ -35,20 +35,20 @@ def sha256base64(data):
|
||||
return digest
|
||||
|
||||
|
||||
def parse_url(requset_url):
|
||||
stidx = requset_url.index("://")
|
||||
host = requset_url[stidx + 3 :]
|
||||
schema = requset_url[: stidx + 3]
|
||||
def parse_url(request_url):
|
||||
stidx = request_url.index("://")
|
||||
host = request_url[stidx + 3 :]
|
||||
schema = request_url[: stidx + 3]
|
||||
edidx = host.index("/")
|
||||
if edidx <= 0:
|
||||
raise AssembleHeaderException("invalid request url:" + requset_url)
|
||||
raise AssembleHeaderException("invalid request url:" + request_url)
|
||||
path = host[edidx:]
|
||||
host = host[:edidx]
|
||||
u = Url(host, path, schema)
|
||||
return u
|
||||
|
||||
def assemble_ws_auth_url(requset_url, method="GET", api_key="", api_secret=""):
|
||||
u = parse_url(requset_url)
|
||||
def assemble_ws_auth_url(request_url, method="GET", api_key="", api_secret=""):
|
||||
u = parse_url(request_url)
|
||||
host = u.host
|
||||
path = u.path
|
||||
now = datetime.now()
|
||||
@@ -69,7 +69,7 @@ def assemble_ws_auth_url(requset_url, method="GET", api_key="", api_secret=""):
|
||||
)
|
||||
values = {"host": host, "date": date, "authorization": authorization}
|
||||
|
||||
return requset_url + "?" + urlencode(values)
|
||||
return request_url + "?" + urlencode(values)
|
||||
|
||||
|
||||
def get_body(appid, text):
|
||||
|
||||
@@ -42,6 +42,6 @@ class ScrapeTool(BuiltinTool):
|
||||
result += "URL: " + i.get('url', '') + "\n"
|
||||
result += "CONTENT: " + i.get('content', '') + "\n\n"
|
||||
except Exception as e:
|
||||
return self.create_text_message("An error occured", str(e))
|
||||
return self.create_text_message("An error occurred", str(e))
|
||||
|
||||
return self.create_text_message(result)
|
||||
|
||||
@@ -8,7 +8,7 @@ from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.rag.datasource.retrieval_service import RetrievalService
|
||||
from core.rag.rerank.rerank_model import RerankModelRunner
|
||||
from core.rag.retrieval.retrival_methods import RetrievalMethod
|
||||
from core.rag.retrieval.retrieval_methods import RetrievalMethod
|
||||
from core.tools.tool.dataset_retriever.dataset_retriever_base_tool import DatasetRetrieverBaseTool
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
@@ -163,7 +163,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
|
||||
|
||||
if dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
documents = RetrievalService.retrieve(retrival_method='keyword_search',
|
||||
documents = RetrievalService.retrieve(retrieval_method='keyword_search',
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
top_k=self.top_k
|
||||
@@ -173,7 +173,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
|
||||
else:
|
||||
if self.top_k > 0:
|
||||
# retrieval source
|
||||
documents = RetrievalService.retrieve(retrival_method=retrieval_model['search_method'],
|
||||
documents = RetrievalService.retrieve(retrieval_method=retrieval_model['search_method'],
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
top_k=self.top_k,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.rag.datasource.retrieval_service import RetrievalService
|
||||
from core.rag.retrieval.retrival_methods import RetrievalMethod
|
||||
from core.rag.retrieval.retrieval_methods import RetrievalMethod
|
||||
from core.tools.tool.dataset_retriever.dataset_retriever_base_tool import DatasetRetrieverBaseTool
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
@@ -63,7 +63,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
|
||||
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
|
||||
if dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
documents = RetrievalService.retrieve(retrival_method='keyword_search',
|
||||
documents = RetrievalService.retrieve(retrieval_method='keyword_search',
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
top_k=self.top_k
|
||||
@@ -72,7 +72,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
|
||||
else:
|
||||
if self.top_k > 0:
|
||||
# retrieval source
|
||||
documents = RetrievalService.retrieve(retrival_method=retrieval_model.get('search_method', 'semantic_search'),
|
||||
documents = RetrievalService.retrieve(retrieval_method=retrieval_model.get('search_method', 'semantic_search'),
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
top_k=self.top_k,
|
||||
|
||||
@@ -17,7 +17,7 @@ from core.tools.tool.tool import Tool
|
||||
|
||||
|
||||
class DatasetRetrieverTool(Tool):
|
||||
retrival_tool: DatasetRetrieverBaseTool
|
||||
retrieval_tool: DatasetRetrieverBaseTool
|
||||
|
||||
@staticmethod
|
||||
def get_dataset_tools(tenant_id: str,
|
||||
@@ -42,7 +42,7 @@ class DatasetRetrieverTool(Tool):
|
||||
# Agent only support SINGLE mode
|
||||
original_retriever_mode = retrieve_config.retrieve_strategy
|
||||
retrieve_config.retrieve_strategy = DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE
|
||||
retrival_tools = feature.to_dataset_retriever_tool(
|
||||
retrieval_tools = feature.to_dataset_retriever_tool(
|
||||
tenant_id=tenant_id,
|
||||
dataset_ids=dataset_ids,
|
||||
retrieve_config=retrieve_config,
|
||||
@@ -53,17 +53,17 @@ class DatasetRetrieverTool(Tool):
|
||||
# restore retrieve strategy
|
||||
retrieve_config.retrieve_strategy = original_retriever_mode
|
||||
|
||||
# convert retrival tools to Tools
|
||||
# convert retrieval tools to Tools
|
||||
tools = []
|
||||
for retrival_tool in retrival_tools:
|
||||
for retrieval_tool in retrieval_tools:
|
||||
tool = DatasetRetrieverTool(
|
||||
retrival_tool=retrival_tool,
|
||||
identity=ToolIdentity(provider='', author='', name=retrival_tool.name, label=I18nObject(en_US='', zh_Hans='')),
|
||||
retrieval_tool=retrieval_tool,
|
||||
identity=ToolIdentity(provider='', author='', name=retrieval_tool.name, label=I18nObject(en_US='', zh_Hans='')),
|
||||
parameters=[],
|
||||
is_team_authorization=True,
|
||||
description=ToolDescription(
|
||||
human=I18nObject(en_US='', zh_Hans=''),
|
||||
llm=retrival_tool.description),
|
||||
llm=retrieval_tool.description),
|
||||
runtime=DatasetRetrieverTool.Runtime()
|
||||
)
|
||||
|
||||
@@ -95,7 +95,7 @@ class DatasetRetrieverTool(Tool):
|
||||
return self.create_text_message(text='please input query')
|
||||
|
||||
# invoke dataset retriever tool
|
||||
result = self.retrival_tool._run(query=query)
|
||||
result = self.retrieval_tool._run(query=query)
|
||||
|
||||
return self.create_text_message(text=result)
|
||||
|
||||
|
||||
@@ -189,8 +189,8 @@ def extract_text_blocks_as_plain_text(paragraph_html):
|
||||
|
||||
|
||||
def plain_text_leaf_node(element):
|
||||
# Extract all text, stripped of any child HTML elements and normalise it
|
||||
plain_text = normalise_text(element.get_text())
|
||||
# Extract all text, stripped of any child HTML elements and normalize it
|
||||
plain_text = normalize_text(element.get_text())
|
||||
if plain_text != "" and element.name == "li":
|
||||
plain_text = "* {}, ".format(plain_text)
|
||||
if plain_text == "":
|
||||
@@ -231,8 +231,8 @@ def plain_element(element, content_digests, node_indexes):
|
||||
# For leaf node elements, extract the text content, discarding any HTML tags
|
||||
# 1. Get element contents as text
|
||||
plain_text = element.get_text()
|
||||
# 2. Normalise the extracted text string to a canonical representation
|
||||
plain_text = normalise_text(plain_text)
|
||||
# 2. Normalize the extracted text string to a canonical representation
|
||||
plain_text = normalize_text(plain_text)
|
||||
# 3. Update element content to be plain text
|
||||
element.string = plain_text
|
||||
elif is_text(element):
|
||||
@@ -243,7 +243,7 @@ def plain_element(element, content_digests, node_indexes):
|
||||
element = type(element)("")
|
||||
else:
|
||||
plain_text = element.string
|
||||
plain_text = normalise_text(plain_text)
|
||||
plain_text = normalize_text(plain_text)
|
||||
element = type(element)(plain_text)
|
||||
else:
|
||||
# If not a leaf node or leaf type call recursively on child nodes, replacing
|
||||
@@ -267,12 +267,12 @@ def add_node_indexes(element, node_index="0"):
|
||||
return element
|
||||
|
||||
|
||||
def normalise_text(text):
|
||||
"""Normalise unicode and whitespace."""
|
||||
# Normalise unicode first to try and standardise whitespace characters as much as possible before normalising them
|
||||
def normalize_text(text):
|
||||
"""Normalize unicode and whitespace."""
|
||||
# Normalize unicode first to try and standardize whitespace characters as much as possible before normalizing them
|
||||
text = strip_control_characters(text)
|
||||
text = normalise_unicode(text)
|
||||
text = normalise_whitespace(text)
|
||||
text = normalize_unicode(text)
|
||||
text = normalize_whitespace(text)
|
||||
return text
|
||||
|
||||
|
||||
@@ -291,14 +291,14 @@ def strip_control_characters(text):
|
||||
return "".join(["" if (unicodedata.category(char) in control_chars) and (char not in retained_chars) else char for char in text])
|
||||
|
||||
|
||||
def normalise_unicode(text):
|
||||
"""Normalise unicode such that things that are visually equivalent map to the same unicode string where possible."""
|
||||
def normalize_unicode(text):
|
||||
"""Normalize unicode such that things that are visually equivalent map to the same unicode string where possible."""
|
||||
normal_form = "NFKC"
|
||||
text = unicodedata.normalize(normal_form, text)
|
||||
return text
|
||||
|
||||
|
||||
def normalise_whitespace(text):
|
||||
def normalize_whitespace(text):
|
||||
"""Replace runs of whitespace characters with a single space as this is what happens when HTML text is displayed."""
|
||||
text = regex.sub(r"\s+", " ", text)
|
||||
# Remove leading and trailing whitespace
|
||||
|
||||
Reference in New Issue
Block a user