Chore: change dataset's i18n to knowledge (#1629)

This commit is contained in:
Joel
2023-11-27 17:22:16 +08:00
committed by GitHub
parent 80ddb00f10
commit 7bbfac5dba
18 changed files with 256 additions and 256 deletions

View File

@@ -1,7 +1,7 @@
const translation = {
steps: {
header: {
creation: 'Create Dataset',
creation: 'Create Knowledge',
update: 'Add data',
},
one: 'Choose data source',
@@ -9,7 +9,7 @@ const translation = {
three: 'Execute and finish',
},
error: {
unavailable: 'This dataset is not avaliable',
unavailable: 'This Knowledge is not avaliable',
},
stepOne: {
filePreview: 'File Preview',
@@ -37,11 +37,11 @@ const translation = {
notionSyncTip: 'To sync with Notion, connection to Notion must be established first.',
connect: 'Go to connect',
button: 'next',
emptyDatasetCreation: 'I want to create an empty dataset',
emptyDatasetCreation: 'I want to create an empty Knowledge',
modal: {
title: 'Create an empty dataset',
tip: 'An empty dataset will contain no documents, and you can upload documents any time.',
input: 'Dataset name',
title: 'Create an empty Knowledge',
tip: 'An empty Knowledge will contain no documents, and you can upload documents any time.',
input: 'Knowledge name',
placeholder: 'Please input',
nameNotEmpty: 'Name cannot be empty',
nameLengthInvaild: 'Name must be between 1 to 40 characters',
@@ -52,14 +52,14 @@ const translation = {
overCountLimit: 'All your documents have overed limit {{countLimit}}.',
},
stepTwo: {
segmentation: 'Segmentation settings',
segmentation: 'Chunk settings',
auto: 'Automatic',
autoDescription: 'Automatically set segmentation and preprocessing rules. Unfamiliar users are recommended to select this.',
autoDescription: 'Automatically set chunk and preprocessing rules. Unfamiliar users are recommended to select this.',
custom: 'Custom',
customDescription: 'Customize segmentation rules, segmentation length, and preprocessing rules, etc.',
customDescription: 'Customize chunks rules, chunks length, and preprocessing rules, etc.',
separator: 'Segment identifier',
separatorPlaceholder: 'For example, newline (\\\\n) or special separator (such as "***")',
maxLength: 'Maximum segment length',
maxLength: 'Maximum chunk length',
rules: 'Text preprocessing rules',
removeExtraSpaces: 'Replace consecutive spaces, newlines and tabs',
removeUrlEmails: 'Delete all URLs and email addresses',
@@ -78,8 +78,8 @@ const translation = {
QATip: 'Enable this option will consume more tokens',
QALanguage: 'Segment using',
emstimateCost: 'Estimation',
emstimateSegment: 'Estimated segments',
segmentCount: 'segments',
emstimateSegment: 'Estimated chunks',
segmentCount: 'chunks',
calculating: 'Calculating...',
fileSource: 'Preprocess documents',
notionSource: 'Preprocess pages',
@@ -90,33 +90,33 @@ const translation = {
nextStep: 'Save & Process',
save: 'Save & Process',
cancel: 'Cancel',
sideTipTitle: 'Why segment and preprocess?',
sideTipP1: 'When processing text data, segmentation and cleaning are two important preprocessing steps.',
sideTipTitle: 'Why chunk and preprocess?',
sideTipP1: 'When processing text data, chunk and cleaning are two important preprocessing steps.',
sideTipP2: 'Segmentation splits long text into paragraphs so models can understand better. This improves the quality and relevance of model results.',
sideTipP3: 'Cleaning removes unnecessary characters and formats, making datasets cleaner and easier to parse.',
sideTipP4: 'Proper segmentation and cleaning improve model performance, providing more accurate and valuable results.',
sideTipP3: 'Cleaning removes unnecessary characters and formats, making Knowledge cleaner and easier to parse.',
sideTipP4: 'Proper chunk and cleaning improve model performance, providing more accurate and valuable results.',
previewTitle: 'Preview',
previewTitleButton: 'Preview',
previewButton: 'Switching to Q&A format',
previewSwitchTipStart: 'The current segment preview is in text format, switching to a question-and-answer format preview will',
previewSwitchTipStart: 'The current chunk preview is in text format, switching to a question-and-answer format preview will',
previewSwitchTipEnd: ' consume additional tokens',
characters: 'characters',
indexSettedTip: 'To change the index method, please go to the ',
retrivalSettedTip: 'To change the index method, please go to the ',
datasetSettingLink: 'dataset settings.',
datasetSettingLink: 'Knowledge settings.',
},
stepThree: {
creationTitle: '🎉 Dataset created',
creationContent: 'We automatically named the dataset, you can modify it at any time',
label: 'Dataset name',
creationTitle: '🎉 Knowledge created',
creationContent: 'We automatically named the Knowledge, you can modify it at any time',
label: 'Knowledge name',
additionTitle: '🎉 Document uploaded',
additionP1: 'The document has been uploaded to the dataset',
additionP2: ', you can find it in the document list of the dataset。',
additionP1: 'The document has been uploaded to the Knowledge',
additionP2: ', you can find it in the document list of the Knowledge.',
stop: 'Stop processing',
resume: 'Resume processing',
navTo: 'Go to document',
sideTipTitle: 'What\'s next',
sideTipContent: 'After the document finishes indexing, the dataset can be integrated into the application as context, you can find the context setting in the prompt orchestration page. You can also create it as an independent ChatGPT indexing plugin for release.',
sideTipContent: 'After the document finishes indexing, the Knowledge can be integrated into the application as context, you can find the context setting in the prompt orchestration page. You can also create it as an independent ChatGPT indexing plugin for release.',
modelTitle: 'Are you sure to stop embedding?',
modelContent: 'If you need to resume processing later, you will continue from where you left off.',
modelButtonConfirm: 'Confirm',