# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2025-04-29 14:50+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #: common/auth/authenticate.py:80 msgid "Not logged in, please log in first" msgstr "" #: common/auth/authenticate.py:82 common/auth/authenticate.py:89 #: common/auth/authenticate.py:95 msgid "Authentication information is incorrect! illegal user" msgstr "" #: common/auth/authentication.py:96 msgid "No permission to access" msgstr "" #: common/auth/handle/impl/user_token.py:242 msgid "Login expired" msgstr "" #: common/constants/exception_code_constants.py:31 #: users/serializers/login.py:53 msgid "The username or password is incorrect" msgstr "" #: common/constants/exception_code_constants.py:32 msgid "Please log in first and bring the user Token" msgstr "" #: common/constants/exception_code_constants.py:33 msgid "Email sending failed" msgstr "" #: common/constants/exception_code_constants.py:34 msgid "Email format error" msgstr "" #: common/constants/exception_code_constants.py:35 msgid "The email has been registered, please log in directly" msgstr "" #: common/constants/exception_code_constants.py:36 msgid "The email is not registered, please register first" msgstr "" #: common/constants/exception_code_constants.py:38 msgid "The verification code is incorrect or the verification code has expired" msgstr "" #: common/constants/exception_code_constants.py:39 msgid "The username has been registered, please log in directly" msgstr "" #: common/constants/exception_code_constants.py:41 msgid "" "The username cannot be empty and must be between 6 and 20 characters long." msgstr "" #: common/constants/exception_code_constants.py:43 msgid "Password and confirmation password are inconsistent" msgstr "" #: common/event/__init__.py:27 msgid "The download process was interrupted, please try again" msgstr "" #: common/event/listener_manage.py:90 #, python-brace-format msgid "Query vector data: {paragraph_id_list} error {error} {traceback}" msgstr "" #: common/event/listener_manage.py:95 #, python-brace-format msgid "Start--->Embedding paragraph: {paragraph_id_list}" msgstr "" #: common/event/listener_manage.py:107 #, python-brace-format msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}" msgstr "" #: common/event/listener_manage.py:113 #, python-brace-format msgid "End--->Embedding paragraph: {paragraph_id_list}" msgstr "" #: common/event/listener_manage.py:122 #, python-brace-format msgid "Start--->Embedding paragraph: {paragraph_id}" msgstr "" #: common/event/listener_manage.py:147 #, python-brace-format msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}" msgstr "" #: common/event/listener_manage.py:152 #, python-brace-format msgid "End--->Embedding paragraph: {paragraph_id}" msgstr "" #: common/event/listener_manage.py:268 #, python-brace-format msgid "Start--->Embedding document: {document_id}" msgstr "" #: common/event/listener_manage.py:288 #, python-brace-format msgid "Vectorized document: {document_id} error {error} {traceback}" msgstr "" #: common/event/listener_manage.py:293 #, python-brace-format msgid "End--->Embedding document: {document_id}" msgstr "" #: common/event/listener_manage.py:304 #, python-brace-format msgid "Start--->Embedding knowledge: {knowledge_id}" msgstr "" #: common/event/listener_manage.py:308 #, python-brace-format msgid "Start--->Embedding document: {document_list}" msgstr "" #: common/event/listener_manage.py:312 knowledge/task/embedding.py:116 #, python-brace-format msgid "Vectorized knowledge: {knowledge_id} error {error} {traceback}" msgstr "" #: common/event/listener_manage.py:315 #, python-brace-format msgid "End--->Embedding knowledge: {knowledge_id}" msgstr "" #: common/exception/handle_exception.py:32 msgid "Unknown exception" msgstr "" #: common/forms/base_field.py:64 #, python-brace-format msgid "The field {field_label} is required" msgstr "" #: common/forms/slider_field.py:56 #, python-brace-format msgid "The {field_label} cannot be less than {min}" msgstr "" #: common/forms/slider_field.py:62 #, python-brace-format msgid "The {field_label} cannot be greater than {max}" msgstr "" #: common/result/api.py:17 common/result/api.py:27 msgid "response code" msgstr "" #: common/result/api.py:18 common/result/api.py:19 common/result/api.py:28 #: common/result/api.py:29 msgid "error prompt" msgstr "" #: common/result/api.py:43 msgid "total number of data" msgstr "" #: common/result/api.py:44 msgid "current page" msgstr "" #: common/result/api.py:45 msgid "page size" msgstr "" #: common/result/result.py:31 msgid "Success" msgstr "" #: common/utils/common.py:85 msgid "Text-to-speech node, the text content must be of string type" msgstr "" #: common/utils/common.py:87 msgid "Text-to-speech node, the text content cannot be empty" msgstr "" #: common/utils/common.py:239 #, python-brace-format msgid "Limit {count} exceeded, please contact us (https://fit2cloud.com/)." msgstr "" #: folders/models/folder.py:6 folders/models/folder.py:13 #: folders/serializers/folder.py:86 msgid "folder name" msgstr "" #: folders/models/folder.py:9 folders/models/folder.py:15 #: folders/serializers/folder.py:89 msgid "parent id" msgstr "" #: folders/serializers/folder.py:63 msgid "Folder depth cannot exceed 3 levels" msgstr "" #: folders/serializers/folder.py:85 folders/serializers/folder.py:121 #: knowledge/serializers/knowledge.py:27 knowledge/serializers/knowledge.py:34 #: tools/serializers/tool.py:339 msgid "folder id" msgstr "" #: folders/serializers/folder.py:87 msgid "folder user id" msgstr "" #: folders/serializers/folder.py:88 folders/serializers/folder.py:122 #: folders/serializers/folder.py:166 knowledge/serializers/knowledge.py:44 #: models_provider/api/model.py:40 models_provider/api/model.py:53 #: models_provider/serializers/model_serializer.py:262 #: models_provider/serializers/model_serializer.py:326 #: tools/serializers/tool.py:169 tools/serializers/tool.py:190 #: tools/serializers/tool.py:248 tools/serializers/tool.py:292 #: tools/serializers/tool.py:322 tools/serializers/tool.py:338 msgid "workspace id" msgstr "" #: folders/serializers/folder.py:92 knowledge/serializers/knowledge.py:43 #: models_provider/serializers/model_serializer.py:108 #: models_provider/serializers/model_serializer.py:215 #: models_provider/serializers/model_serializer.py:255 #: tools/serializers/tool.py:168 tools/serializers/tool.py:189 msgid "user id" msgstr "" #: folders/serializers/folder.py:93 folders/serializers/folder.py:123 #: folders/serializers/folder.py:167 tools/serializers/tool.py:97 msgid "source" msgstr "" #: folders/serializers/folder.py:106 msgid "Folder name already exists" msgstr "" #: folders/serializers/folder.py:132 msgid "Folder does not exist" msgstr "" #: folders/serializers/folder.py:160 msgid "Cannot delete root folder" msgstr "" #: folders/views/folder.py:19 folders/views/folder.py:20 msgid "Create folder" msgstr "" #: folders/views/folder.py:24 folders/views/folder.py:41 #: folders/views/folder.py:60 folders/views/folder.py:75 #: folders/views/folder.py:90 msgid "Folder" msgstr "" #: folders/views/folder.py:37 folders/views/folder.py:38 msgid "Get folder tree" msgstr "" #: folders/views/folder.py:55 folders/views/folder.py:56 msgid "Update folder" msgstr "" #: folders/views/folder.py:71 folders/views/folder.py:72 msgid "Get folder" msgstr "" #: folders/views/folder.py:86 folders/views/folder.py:87 msgid "Delete folder" msgstr "" #: knowledge/serializers/common.py:98 knowledge/serializers/knowledge.py:37 msgid "source url" msgstr "" #: knowledge/serializers/common.py:99 msgid "selector" msgstr "" #: knowledge/serializers/common.py:106 #, python-brace-format msgid "URL error, cannot parse [{source_url}]" msgstr "" #: knowledge/serializers/common.py:114 msgid "id list" msgstr "" #: knowledge/serializers/common.py:124 #, python-brace-format msgid "The following id does not exist: {error_id_list}" msgstr "" #: knowledge/serializers/common.py:181 knowledge/serializers/common.py:205 msgid "The knowledge base is inconsistent with the vector model" msgstr "" #: knowledge/serializers/common.py:183 knowledge/serializers/common.py:207 msgid "Knowledge base setting error, please reset the knowledge base" msgstr "" #: knowledge/serializers/common.py:212 msgid "Model id" msgstr "" #: knowledge/serializers/common.py:213 msgid "Prompt word" msgstr "" #: knowledge/serializers/common.py:215 msgid "state list" msgstr "" #: knowledge/serializers/document.py:26 msgid "document name" msgstr "" #: knowledge/serializers/document.py:31 knowledge/serializers/knowledge.py:26 #: knowledge/serializers/knowledge.py:33 msgid "knowledge name" msgstr "" #: knowledge/serializers/document.py:32 knowledge/serializers/knowledge.py:28 #: knowledge/serializers/knowledge.py:35 msgid "knowledge description" msgstr "" #: knowledge/serializers/document.py:33 msgid "embedding model" msgstr "" #: knowledge/serializers/document.py:39 knowledge/serializers/document.py:90 #: knowledge/serializers/paragraph.py:58 knowledge/serializers/paragraph.py:150 msgid "document id" msgstr "" #: knowledge/serializers/document.py:40 knowledge/serializers/paragraph.py:149 msgid "knowledge id" msgstr "" #: knowledge/serializers/document.py:46 msgid "document id not exist" msgstr "" #: knowledge/serializers/document.py:71 #: models_provider/serializers/model_serializer.py:116 #: models_provider/serializers/model_serializer.py:132 #: models_provider/serializers/model_serializer.py:151 #: models_provider/serializers/model_serializer.py:178 #: models_provider/serializers/model_serializer.py:373 #: models_provider/tools.py:111 msgid "Model does not exist" msgstr "" #: knowledge/serializers/document.py:73 msgid "No permission to use this model" msgstr "" #: knowledge/serializers/document.py:87 msgid "The task is being executed, please do not send it repeatedly." msgstr "" #: knowledge/serializers/document.py:95 msgid "knowledge id not exist" msgstr "" #: knowledge/serializers/knowledge.py:29 knowledge/serializers/knowledge.py:36 msgid "knowledge embedding" msgstr "" #: knowledge/serializers/knowledge.py:38 msgid "knowledge selector" msgstr "" #: knowledge/serializers/knowledge.py:55 msgid "" "The community version supports up to 50 knowledge bases. If you need more " "knowledge bases, please contact us (https://fit2cloud.com/)." msgstr "" #: knowledge/serializers/knowledge.py:64 knowledge/serializers/knowledge.py:123 msgid "Knowledge base name duplicate!" msgstr "" #: knowledge/serializers/paragraph.py:31 knowledge/serializers/problem.py:15 msgid "content" msgstr "" #: knowledge/serializers/paragraph.py:33 knowledge/serializers/paragraph.py:40 #: knowledge/serializers/paragraph.py:43 knowledge/serializers/paragraph.py:48 #: knowledge/serializers/paragraph.py:50 msgid "section title" msgstr "" #: knowledge/serializers/paragraph.py:36 tools/serializers/tool.py:127 #: tools/serializers/tool.py:147 msgid "Is active" msgstr "" #: knowledge/serializers/paragraph.py:54 msgid "paragraph id" msgstr "" #: knowledge/serializers/paragraph.py:56 msgid "dataset id" msgstr "" #: knowledge/serializers/paragraph.py:63 msgid "Paragraph id does not exist" msgstr "" #: knowledge/serializers/paragraph.py:99 msgid "Problem id does not exist" msgstr "" #: knowledge/serializers/paragraph.py:156 msgid "The document id is incorrect" msgstr "" #: knowledge/serializers/problem.py:14 msgid "problem id" msgstr "" #: knowledge/task/embedding.py:24 knowledge/task/embedding.py:74 #, python-brace-format msgid "Failed to obtain vector model: {error} {traceback}" msgstr "" #: knowledge/task/embedding.py:103 #, python-brace-format msgid "Start--->Vectorized knowledge: {knowledge_id}" msgstr "" #: knowledge/task/embedding.py:107 #, python-brace-format msgid "Knowledge documentation: {document_names}" msgstr "" #: knowledge/task/embedding.py:120 #, python-brace-format msgid "End--->Vectorized knowledge: {knowledge_id}" msgstr "" #: knowledge/task/handler.py:107 #, python-brace-format msgid "Association problem failed {error}" msgstr "" #: knowledge/task/sync.py:29 knowledge/task/sync.py:44 #, python-brace-format msgid "Start--->Start synchronization web knowledge base:{knowledge_id}" msgstr "" #: knowledge/task/sync.py:34 knowledge/task/sync.py:48 #, python-brace-format msgid "End--->End synchronization web knowledge base:{knowledge_id}" msgstr "" #: knowledge/task/sync.py:36 knowledge/task/sync.py:50 #, python-brace-format msgid "Synchronize web knowledge base:{knowledge_id} error{error}{traceback}" msgstr "" #: knowledge/views/knowledge.py:19 knowledge/views/knowledge.py:20 msgid "Get knowledge by folder" msgstr "" #: knowledge/views/knowledge.py:23 knowledge/views/knowledge.py:42 #: knowledge/views/knowledge.py:61 msgid "Knowledge Base" msgstr "" #: knowledge/views/knowledge.py:37 knowledge/views/knowledge.py:38 msgid "Create base knowledge" msgstr "" #: knowledge/views/knowledge.py:56 knowledge/views/knowledge.py:57 msgid "Create web knowledge" msgstr "" #: maxkb/settings/base.py:85 msgid "Intelligent customer service platform" msgstr "" #: models_provider/api/model.py:59 #: models_provider/serializers/model_serializer.py:107 #: models_provider/serializers/model_serializer.py:367 msgid "model id" msgstr "" #: models_provider/api/provide.py:17 models_provider/api/provide.py:23 #: models_provider/api/provide.py:28 models_provider/api/provide.py:30 #: models_provider/api/provide.py:82 #: models_provider/serializers/model_serializer.py:40 #: models_provider/serializers/model_serializer.py:218 #: models_provider/serializers/model_serializer.py:256 #: models_provider/serializers/model_serializer.py:321 msgid "model name" msgstr "" #: models_provider/api/provide.py:18 models_provider/api/provide.py:38 #: models_provider/api/provide.py:76 models_provider/api/provide.py:104 #: models_provider/api/provide.py:126 #: models_provider/serializers/model_serializer.py:41 #: models_provider/serializers/model_serializer.py:257 #: models_provider/serializers/model_serializer.py:324 msgid "provider" msgstr "" #: models_provider/api/provide.py:19 msgid "icon" msgstr "" #: models_provider/api/provide.py:24 msgid "value" msgstr "" #: models_provider/api/provide.py:29 models_provider/api/provide.py:70 #: models_provider/api/provide.py:98 #: models_provider/serializers/model_serializer.py:42 #: models_provider/serializers/model_serializer.py:220 #: models_provider/serializers/model_serializer.py:258 #: models_provider/serializers/model_serializer.py:322 msgid "model type" msgstr "" #: models_provider/api/provide.py:34 tools/serializers/tool.py:107 msgid "input type" msgstr "" #: models_provider/api/provide.py:35 msgid "label" msgstr "" #: models_provider/api/provide.py:36 msgid "text field" msgstr "" #: models_provider/api/provide.py:37 msgid "value field" msgstr "" #: models_provider/api/provide.py:39 msgid "method" msgstr "" #: models_provider/api/provide.py:40 tools/serializers/tool.py:92 #: tools/serializers/tool.py:106 msgid "required" msgstr "" #: models_provider/api/provide.py:41 msgid "default value" msgstr "" #: models_provider/api/provide.py:42 msgid "relation show field dict" msgstr "" #: models_provider/api/provide.py:43 msgid "relation trigger field dict" msgstr "" #: models_provider/api/provide.py:44 msgid "trigger type" msgstr "" #: models_provider/api/provide.py:45 msgid "attrs" msgstr "" #: models_provider/api/provide.py:46 msgid "props info" msgstr "" #: models_provider/base_model_provider.py:60 msgid "Model type cannot be empty" msgstr "" #: models_provider/base_model_provider.py:85 msgid "The current platform does not support downloading models" msgstr "" #: models_provider/base_model_provider.py:143 msgid "LLM" msgstr "" #: models_provider/base_model_provider.py:144 msgid "Embedding Model" msgstr "" #: models_provider/base_model_provider.py:145 msgid "Speech2Text" msgstr "" #: models_provider/base_model_provider.py:146 msgid "TTS" msgstr "" #: models_provider/base_model_provider.py:147 msgid "Vision Model" msgstr "" #: models_provider/base_model_provider.py:148 msgid "Image Generation" msgstr "" #: models_provider/base_model_provider.py:149 msgid "Rerank" msgstr "" #: models_provider/base_model_provider.py:223 msgid "The model does not support" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 msgid "" "With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " "Lab, developers can integrate high-quality text retrieval and sorting " "through the LlamaIndex framework." msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 msgid "" "Chinese (including various dialects such as Cantonese), English, Japanese, " "and Korean support free switching between multiple languages." msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 msgid "" "CosyVoice is based on a new generation of large generative speech models, " "which can predict emotions, intonation, rhythm, etc. based on context, and " "has better anthropomorphic effects." msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 msgid "" "Universal text vector is Tongyi Lab's multi-language text unified vector " "model based on the LLM base. It provides high-level vector services for " "multiple mainstream languages around the world and helps developers quickly " "convert text data into high-quality vector data." msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 msgid "" "Tongyi Wanxiang - a large image model for text generation, supports " "bilingual input in Chinese and English, and supports the input of reference " "pictures for reference content or reference style migration. Key styles " "include but are not limited to watercolor, oil painting, Chinese painting, " "sketch, flat illustration, two-dimensional, and 3D. Cartoon." msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 msgid "Alibaba Cloud Bailian" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:53 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:50 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:74 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:61 #: models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 #: models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 #: models_provider/impl/anthropic_model_provider/credential/image.py:33 #: models_provider/impl/anthropic_model_provider/credential/llm.py:57 #: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 #: models_provider/impl/azure_model_provider/credential/embedding.py:37 #: models_provider/impl/azure_model_provider/credential/image.py:40 #: models_provider/impl/azure_model_provider/credential/llm.py:69 #: models_provider/impl/deepseek_model_provider/credential/llm.py:57 #: models_provider/impl/gemini_model_provider/credential/embedding.py:36 #: models_provider/impl/gemini_model_provider/credential/image.py:32 #: models_provider/impl/gemini_model_provider/credential/llm.py:57 #: models_provider/impl/gemini_model_provider/model/stt.py:43 #: models_provider/impl/kimi_model_provider/credential/llm.py:57 #: models_provider/impl/local_model_provider/credential/embedding.py:36 #: models_provider/impl/local_model_provider/credential/reranker.py:37 #: models_provider/impl/ollama_model_provider/credential/embedding.py:37 #: models_provider/impl/ollama_model_provider/credential/reranker.py:44 #: models_provider/impl/openai_model_provider/credential/embedding.py:36 #: models_provider/impl/openai_model_provider/credential/image.py:35 #: models_provider/impl/openai_model_provider/credential/llm.py:59 #: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 #: models_provider/impl/siliconCloud_model_provider/credential/image.py:35 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 #: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 #: models_provider/impl/tencent_model_provider/credential/embedding.py:23 #: models_provider/impl/tencent_model_provider/credential/image.py:37 #: models_provider/impl/tencent_model_provider/credential/llm.py:51 #: models_provider/impl/tencent_model_provider/model/tti.py:54 #: models_provider/impl/vllm_model_provider/credential/embedding.py:36 #: models_provider/impl/vllm_model_provider/credential/llm.py:50 #: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 #: models_provider/impl/volcanic_engine_model_provider/credential/image.py:32 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 #: models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 #: models_provider/impl/wenxin_model_provider/credential/embedding.py:31 #: models_provider/impl/wenxin_model_provider/credential/llm.py:60 #: models_provider/impl/xf_model_provider/credential/embedding.py:31 #: models_provider/impl/xf_model_provider/credential/llm.py:76 #: models_provider/impl/xf_model_provider/model/tts.py:101 #: models_provider/impl/xinference_model_provider/credential/embedding.py:31 #: models_provider/impl/xinference_model_provider/credential/image.py:32 #: models_provider/impl/xinference_model_provider/credential/llm.py:50 #: models_provider/impl/xinference_model_provider/credential/reranker.py:34 #: models_provider/impl/xinference_model_provider/model/tts.py:44 #: models_provider/impl/zhipu_model_provider/credential/image.py:31 #: models_provider/impl/zhipu_model_provider/credential/llm.py:56 #: models_provider/impl/zhipu_model_provider/model/tti.py:49 msgid "Hello" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:36 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:60 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:46 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:44 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:96 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:89 #: models_provider/impl/anthropic_model_provider/credential/image.py:23 #: models_provider/impl/anthropic_model_provider/credential/llm.py:47 #: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 #: models_provider/impl/azure_model_provider/credential/embedding.py:27 #: models_provider/impl/azure_model_provider/credential/image.py:30 #: models_provider/impl/azure_model_provider/credential/llm.py:59 #: models_provider/impl/azure_model_provider/credential/stt.py:23 #: models_provider/impl/azure_model_provider/credential/tti.py:58 #: models_provider/impl/azure_model_provider/credential/tts.py:41 #: models_provider/impl/deepseek_model_provider/credential/llm.py:47 #: models_provider/impl/gemini_model_provider/credential/embedding.py:26 #: models_provider/impl/gemini_model_provider/credential/image.py:22 #: models_provider/impl/gemini_model_provider/credential/llm.py:47 #: models_provider/impl/gemini_model_provider/credential/stt.py:21 #: models_provider/impl/kimi_model_provider/credential/llm.py:47 #: models_provider/impl/local_model_provider/credential/embedding.py:27 #: models_provider/impl/local_model_provider/credential/reranker.py:28 #: models_provider/impl/ollama_model_provider/credential/embedding.py:26 #: models_provider/impl/ollama_model_provider/credential/image.py:19 #: models_provider/impl/ollama_model_provider/credential/llm.py:44 #: models_provider/impl/ollama_model_provider/credential/reranker.py:27 #: models_provider/impl/ollama_model_provider/credential/reranker.py:31 #: models_provider/impl/openai_model_provider/credential/embedding.py:26 #: models_provider/impl/openai_model_provider/credential/image.py:25 #: models_provider/impl/openai_model_provider/credential/llm.py:48 #: models_provider/impl/openai_model_provider/credential/stt.py:22 #: models_provider/impl/openai_model_provider/credential/tti.py:61 #: models_provider/impl/openai_model_provider/credential/tts.py:40 #: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 #: models_provider/impl/siliconCloud_model_provider/credential/image.py:25 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 #: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 #: models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 #: models_provider/impl/siliconCloud_model_provider/credential/tts.py:22 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 #: models_provider/impl/tencent_model_provider/credential/embedding.py:19 #: models_provider/impl/tencent_model_provider/credential/image.py:28 #: models_provider/impl/tencent_model_provider/credential/llm.py:31 #: models_provider/impl/tencent_model_provider/credential/tti.py:78 #: models_provider/impl/vllm_model_provider/credential/embedding.py:26 #: models_provider/impl/vllm_model_provider/credential/image.py:22 #: models_provider/impl/vllm_model_provider/credential/llm.py:39 #: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 #: models_provider/impl/volcanic_engine_model_provider/credential/image.py:22 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 #: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 #: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:51 #: models_provider/impl/wenxin_model_provider/credential/embedding.py:27 #: models_provider/impl/wenxin_model_provider/credential/llm.py:46 #: models_provider/impl/xf_model_provider/credential/embedding.py:27 #: models_provider/impl/xf_model_provider/credential/image.py:29 #: models_provider/impl/xf_model_provider/credential/llm.py:66 #: models_provider/impl/xf_model_provider/credential/stt.py:24 #: models_provider/impl/xf_model_provider/credential/tts.py:47 #: models_provider/impl/xinference_model_provider/credential/embedding.py:19 #: models_provider/impl/xinference_model_provider/credential/image.py:22 #: models_provider/impl/xinference_model_provider/credential/llm.py:39 #: models_provider/impl/xinference_model_provider/credential/reranker.py:25 #: models_provider/impl/xinference_model_provider/credential/stt.py:21 #: models_provider/impl/xinference_model_provider/credential/tti.py:59 #: models_provider/impl/xinference_model_provider/credential/tts.py:39 #: models_provider/impl/zhipu_model_provider/credential/image.py:21 #: models_provider/impl/zhipu_model_provider/credential/llm.py:47 #: models_provider/impl/zhipu_model_provider/credential/tti.py:40 #, python-brace-format msgid "{model_type} Model type is not supported" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:44 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:68 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:55 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:53 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:105 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:98 #, python-brace-format msgid "{key} is required" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:60 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:82 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:69 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:67 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:121 #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:113 #: models_provider/impl/anthropic_model_provider/credential/image.py:43 #: models_provider/impl/anthropic_model_provider/credential/llm.py:65 #: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 #: models_provider/impl/azure_model_provider/credential/image.py:50 #: models_provider/impl/azure_model_provider/credential/stt.py:40 #: models_provider/impl/azure_model_provider/credential/tti.py:77 #: models_provider/impl/azure_model_provider/credential/tts.py:58 #: models_provider/impl/deepseek_model_provider/credential/llm.py:65 #: models_provider/impl/gemini_model_provider/credential/embedding.py:43 #: models_provider/impl/gemini_model_provider/credential/image.py:42 #: models_provider/impl/gemini_model_provider/credential/llm.py:66 #: models_provider/impl/gemini_model_provider/credential/stt.py:38 #: models_provider/impl/kimi_model_provider/credential/llm.py:64 #: models_provider/impl/local_model_provider/credential/embedding.py:44 #: models_provider/impl/local_model_provider/credential/reranker.py:45 #: models_provider/impl/ollama_model_provider/credential/reranker.py:51 #: models_provider/impl/openai_model_provider/credential/embedding.py:43 #: models_provider/impl/openai_model_provider/credential/image.py:45 #: models_provider/impl/openai_model_provider/credential/llm.py:67 #: models_provider/impl/openai_model_provider/credential/stt.py:39 #: models_provider/impl/openai_model_provider/credential/tti.py:80 #: models_provider/impl/openai_model_provider/credential/tts.py:58 #: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 #: models_provider/impl/siliconCloud_model_provider/credential/image.py:45 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 #: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 #: models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 #: models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 #: models_provider/impl/tencent_model_provider/credential/embedding.py:30 #: models_provider/impl/tencent_model_provider/credential/image.py:47 #: models_provider/impl/tencent_model_provider/credential/llm.py:57 #: models_provider/impl/tencent_model_provider/credential/tti.py:104 #: models_provider/impl/vllm_model_provider/credential/embedding.py:43 #: models_provider/impl/vllm_model_provider/credential/image.py:42 #: models_provider/impl/vllm_model_provider/credential/llm.py:55 #: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 #: models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 #: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 #: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:68 #: models_provider/impl/wenxin_model_provider/credential/embedding.py:38 #: models_provider/impl/xf_model_provider/credential/embedding.py:38 #: models_provider/impl/xf_model_provider/credential/image.py:50 #: models_provider/impl/xf_model_provider/credential/llm.py:84 #: models_provider/impl/xf_model_provider/credential/stt.py:41 #: models_provider/impl/xf_model_provider/credential/tts.py:65 #: models_provider/impl/xinference_model_provider/credential/image.py:41 #: models_provider/impl/xinference_model_provider/credential/reranker.py:40 #: models_provider/impl/xinference_model_provider/credential/stt.py:37 #: models_provider/impl/xinference_model_provider/credential/tti.py:77 #: models_provider/impl/xinference_model_provider/credential/tts.py:56 #: models_provider/impl/zhipu_model_provider/credential/image.py:41 #: models_provider/impl/zhipu_model_provider/credential/llm.py:64 #: models_provider/impl/zhipu_model_provider/credential/tti.py:59 #, python-brace-format msgid "" "Verification failed, please check whether the parameters are correct: {error}" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:17 #: models_provider/impl/anthropic_model_provider/credential/llm.py:22 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 #: models_provider/impl/azure_model_provider/credential/llm.py:23 #: models_provider/impl/deepseek_model_provider/credential/llm.py:22 #: models_provider/impl/gemini_model_provider/credential/llm.py:22 #: models_provider/impl/kimi_model_provider/credential/llm.py:22 #: models_provider/impl/ollama_model_provider/credential/llm.py:20 #: models_provider/impl/openai_model_provider/credential/llm.py:23 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 #: models_provider/impl/tencent_model_provider/credential/llm.py:14 #: models_provider/impl/vllm_model_provider/credential/llm.py:15 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 #: models_provider/impl/wenxin_model_provider/credential/llm.py:22 #: models_provider/impl/xf_model_provider/credential/llm.py:22 #: models_provider/impl/xf_model_provider/credential/llm.py:41 #: models_provider/impl/xinference_model_provider/credential/llm.py:15 #: models_provider/impl/zhipu_model_provider/credential/llm.py:22 msgid "Temperature" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:18 #: models_provider/impl/anthropic_model_provider/credential/llm.py:23 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 #: models_provider/impl/azure_model_provider/credential/llm.py:24 #: models_provider/impl/deepseek_model_provider/credential/llm.py:23 #: models_provider/impl/gemini_model_provider/credential/llm.py:23 #: models_provider/impl/kimi_model_provider/credential/llm.py:23 #: models_provider/impl/ollama_model_provider/credential/llm.py:21 #: models_provider/impl/openai_model_provider/credential/llm.py:24 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 #: models_provider/impl/tencent_model_provider/credential/llm.py:15 #: models_provider/impl/vllm_model_provider/credential/llm.py:16 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 #: models_provider/impl/wenxin_model_provider/credential/llm.py:23 #: models_provider/impl/xf_model_provider/credential/llm.py:23 #: models_provider/impl/xf_model_provider/credential/llm.py:42 #: models_provider/impl/xinference_model_provider/credential/llm.py:16 #: models_provider/impl/zhipu_model_provider/credential/llm.py:23 msgid "" "Higher values make the output more random, while lower values make it more " "focused and deterministic" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:30 #: models_provider/impl/anthropic_model_provider/credential/llm.py:31 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 #: models_provider/impl/azure_model_provider/credential/llm.py:32 #: models_provider/impl/azure_model_provider/credential/llm.py:43 #: models_provider/impl/deepseek_model_provider/credential/llm.py:31 #: models_provider/impl/gemini_model_provider/credential/llm.py:31 #: models_provider/impl/kimi_model_provider/credential/llm.py:31 #: models_provider/impl/ollama_model_provider/credential/llm.py:29 #: models_provider/impl/openai_model_provider/credential/llm.py:32 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 #: models_provider/impl/vllm_model_provider/credential/llm.py:24 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 #: models_provider/impl/wenxin_model_provider/credential/llm.py:31 #: models_provider/impl/xf_model_provider/credential/llm.py:31 #: models_provider/impl/xf_model_provider/credential/llm.py:50 #: models_provider/impl/xinference_model_provider/credential/llm.py:24 #: models_provider/impl/zhipu_model_provider/credential/llm.py:31 msgid "Output the maximum Tokens" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:31 msgid "Specify the maximum number of tokens that the model can generate." msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:44 #: models_provider/impl/anthropic_model_provider/credential/image.py:15 #: models_provider/impl/anthropic_model_provider/credential/llm.py:74 msgid "API URL" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 #: models_provider/impl/anthropic_model_provider/credential/image.py:16 #: models_provider/impl/anthropic_model_provider/credential/llm.py:75 msgid "API Key" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 #: models_provider/impl/azure_model_provider/credential/tti.py:15 #: models_provider/impl/openai_model_provider/credential/tti.py:15 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 #: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 #: models_provider/impl/xinference_model_provider/credential/tti.py:14 #: models_provider/impl/zhipu_model_provider/credential/tti.py:15 msgid "Image size" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 #: models_provider/impl/azure_model_provider/credential/tti.py:15 msgid "Specify the size of the generated image, such as: 1024x1024" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 #: models_provider/impl/azure_model_provider/credential/tti.py:40 #: models_provider/impl/openai_model_provider/credential/tti.py:43 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 #: models_provider/impl/xinference_model_provider/credential/tti.py:41 msgid "Number of pictures" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 #: models_provider/impl/azure_model_provider/credential/tti.py:40 msgid "Specify the number of generated images" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:44 msgid "Style" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:44 msgid "Specify the style of generated images" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 msgid "Default value, the image style is randomly output by the model" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 msgid "photography" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 msgid "Portraits" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 msgid "3D cartoon" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 msgid "animation" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 msgid "painting" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 msgid "watercolor" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:55 msgid "sketch" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:56 msgid "Chinese painting" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:57 msgid "flat illustration" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 msgid "Timbre" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 msgid "Chinese sounds can support mixed scenes of Chinese and English" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 msgid "Long Xiaochun" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 msgid "Long Xiaoxia" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 msgid "Long Xiaochen" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 msgid "Long Xiaobai" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 msgid "Long Laotie" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 msgid "Long Shu" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 msgid "Long Shuo" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 msgid "Long Jing" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 msgid "Long Miao" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:35 msgid "Long Yue" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:36 msgid "Long Yuan" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:37 msgid "Long Fei" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:38 msgid "Long Jielidou" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 msgid "Long Tong" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:40 msgid "Long Xiang" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 msgid "Speaking speed" msgstr "" #: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 msgid "[0.5, 2], the default is 1, usually one decimal place is enough" msgstr "" #: models_provider/impl/anthropic_model_provider/credential/image.py:28 #: models_provider/impl/anthropic_model_provider/credential/llm.py:52 #: models_provider/impl/azure_model_provider/credential/embedding.py:32 #: models_provider/impl/azure_model_provider/credential/image.py:35 #: models_provider/impl/azure_model_provider/credential/llm.py:64 #: models_provider/impl/azure_model_provider/credential/stt.py:28 #: models_provider/impl/azure_model_provider/credential/tti.py:63 #: models_provider/impl/azure_model_provider/credential/tts.py:46 #: models_provider/impl/deepseek_model_provider/credential/llm.py:52 #: models_provider/impl/gemini_model_provider/credential/embedding.py:31 #: models_provider/impl/gemini_model_provider/credential/image.py:27 #: models_provider/impl/gemini_model_provider/credential/llm.py:52 #: models_provider/impl/gemini_model_provider/credential/stt.py:26 #: models_provider/impl/kimi_model_provider/credential/llm.py:52 #: models_provider/impl/local_model_provider/credential/embedding.py:31 #: models_provider/impl/local_model_provider/credential/reranker.py:32 #: models_provider/impl/ollama_model_provider/credential/embedding.py:46 #: models_provider/impl/ollama_model_provider/credential/llm.py:62 #: models_provider/impl/ollama_model_provider/credential/reranker.py:63 #: models_provider/impl/openai_model_provider/credential/embedding.py:31 #: models_provider/impl/openai_model_provider/credential/image.py:30 #: models_provider/impl/openai_model_provider/credential/llm.py:53 #: models_provider/impl/openai_model_provider/credential/stt.py:27 #: models_provider/impl/openai_model_provider/credential/tti.py:66 #: models_provider/impl/openai_model_provider/credential/tts.py:45 #: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 #: models_provider/impl/siliconCloud_model_provider/credential/image.py:30 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 #: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 #: models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 #: models_provider/impl/siliconCloud_model_provider/credential/tts.py:27 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 #: models_provider/impl/tencent_model_provider/credential/image.py:32 #: models_provider/impl/vllm_model_provider/credential/embedding.py:31 #: models_provider/impl/vllm_model_provider/credential/image.py:27 #: models_provider/impl/vllm_model_provider/credential/llm.py:65 #: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 #: models_provider/impl/volcanic_engine_model_provider/credential/image.py:27 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 #: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 #: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:56 #: models_provider/impl/wenxin_model_provider/credential/llm.py:55 #: models_provider/impl/wenxin_model_provider/credential/llm.py:72 #: models_provider/impl/xf_model_provider/credential/image.py:34 #: models_provider/impl/xf_model_provider/credential/llm.py:71 #: models_provider/impl/xf_model_provider/credential/stt.py:29 #: models_provider/impl/xf_model_provider/credential/tts.py:52 #: models_provider/impl/xinference_model_provider/credential/embedding.py:40 #: models_provider/impl/xinference_model_provider/credential/image.py:27 #: models_provider/impl/xinference_model_provider/credential/llm.py:59 #: models_provider/impl/xinference_model_provider/credential/reranker.py:29 #: models_provider/impl/xinference_model_provider/credential/stt.py:26 #: models_provider/impl/xinference_model_provider/credential/tti.py:64 #: models_provider/impl/xinference_model_provider/credential/tts.py:44 #: models_provider/impl/zhipu_model_provider/credential/image.py:26 #: models_provider/impl/zhipu_model_provider/credential/llm.py:51 #: models_provider/impl/zhipu_model_provider/credential/tti.py:45 #, python-brace-format msgid "{key} is required" msgstr "" #: models_provider/impl/anthropic_model_provider/credential/llm.py:32 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 #: models_provider/impl/azure_model_provider/credential/llm.py:33 #: models_provider/impl/azure_model_provider/credential/llm.py:44 #: models_provider/impl/deepseek_model_provider/credential/llm.py:32 #: models_provider/impl/gemini_model_provider/credential/llm.py:32 #: models_provider/impl/kimi_model_provider/credential/llm.py:32 #: models_provider/impl/ollama_model_provider/credential/llm.py:30 #: models_provider/impl/openai_model_provider/credential/llm.py:33 #: models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 #: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 #: models_provider/impl/vllm_model_provider/credential/llm.py:25 #: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 #: models_provider/impl/wenxin_model_provider/credential/llm.py:32 #: models_provider/impl/xf_model_provider/credential/llm.py:32 #: models_provider/impl/xf_model_provider/credential/llm.py:51 #: models_provider/impl/xinference_model_provider/credential/llm.py:25 #: models_provider/impl/zhipu_model_provider/credential/llm.py:32 msgid "Specify the maximum number of tokens that the model can generate" msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 msgid "" "An update to Claude 2 that doubles the context window and improves " "reliability, hallucination rates, and evidence-based accuracy in long " "documents and RAG contexts." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 msgid "" "Anthropic is a powerful model that can handle a variety of tasks, from " "complex dialogue and creative content generation to detailed command " "obedience." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 msgid "" "The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" "instant responsiveness. The model can answer simple queries and requests " "quickly. Customers will be able to build seamless AI experiences that mimic " "human interactions. Claude 3 Haiku can process images and return text " "output, and provides 200K context windows." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 msgid "" "The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " "intelligence and speed, especially when it comes to handling enterprise " "workloads. This model offers maximum utility while being priced lower than " "competing products, and it's been engineered to be a solid choice for " "deploying AI at scale." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 msgid "" "The Claude 3.5 Sonnet raises the industry standard for intelligence, " "outperforming competing models and the Claude 3 Opus in extensive " "evaluations, with the speed and cost-effectiveness of our mid-range models." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 msgid "" "A faster, more affordable but still very powerful model that can handle a " "range of tasks including casual conversation, text analysis, summarization " "and document question answering." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 msgid "" "Titan Text Premier is the most powerful and advanced model in the Titan Text " "series, designed to deliver exceptional performance for a variety of " "enterprise applications. With its cutting-edge features, it delivers greater " "accuracy and outstanding results, making it an excellent choice for " "organizations looking for a top-notch text processing solution." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 msgid "" "Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" "tuning English-language tasks, including summarization and copywriting, " "where customers require smaller, more cost-effective, and highly " "customizable models." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 msgid "" "Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " "it ideal for a variety of high-level general language tasks, such as open-" "ended text generation and conversational chat, as well as support in " "retrieval-augmented generation (RAG). At launch, the model is optimized for " "English, but other languages are supported." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 msgid "" "7B dense converter for rapid deployment and easy customization. Small in " "size yet powerful in a variety of use cases. Supports English and code, as " "well as 32k context windows." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 msgid "" "Advanced Mistral AI large-scale language model capable of handling any " "language task, including complex multilingual reasoning, text understanding, " "transformation, and code generation." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 msgid "" "Ideal for content creation, conversational AI, language understanding, R&D, " "and enterprise applications" msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 msgid "" "Ideal for limited computing power and resources, edge devices, and faster " "training times." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 msgid "" "Titan Embed Text is the largest embedding model in the Amazon Titan Embed " "series and can handle various text embedding tasks, such as text " "classification, text similarity calculation, etc." msgstr "" #: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 #: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 #, python-brace-format msgid "The following fields are required: {keys}" msgstr "" #: models_provider/impl/azure_model_provider/credential/embedding.py:44 #: models_provider/impl/azure_model_provider/credential/llm.py:76 msgid "Verification failed, please check whether the parameters are correct" msgstr "" #: models_provider/impl/azure_model_provider/credential/tti.py:28 #: models_provider/impl/openai_model_provider/credential/tti.py:29 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 #: models_provider/impl/xinference_model_provider/credential/tti.py:28 msgid "Picture quality" msgstr "" #: models_provider/impl/azure_model_provider/credential/tts.py:17 #: models_provider/impl/openai_model_provider/credential/tts.py:17 msgid "" "Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " "to find one that suits your desired tone and audience. The current voiceover " "is optimized for English." msgstr "" #: models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 msgid "Good at common conversational tasks, supports 32K contexts" msgstr "" #: models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 msgid "Good at handling programming tasks, supports 16K contexts" msgstr "" #: models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 msgid "Latest Gemini 1.0 Pro model, updated with Google update" msgstr "" #: models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" msgstr "" #: models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 #: models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 #: models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 #: models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 msgid "Latest Gemini 1.5 Flash model, updated with Google updates" msgstr "" #: models_provider/impl/gemini_model_provider/model/stt.py:53 msgid "convert audio to text" msgstr "" #: models_provider/impl/local_model_provider/credential/embedding.py:53 #: models_provider/impl/local_model_provider/credential/reranker.py:54 msgid "Model catalog" msgstr "" #: models_provider/impl/local_model_provider/local_model_provider.py:39 msgid "local model" msgstr "" #: models_provider/impl/ollama_model_provider/credential/embedding.py:30 #: models_provider/impl/ollama_model_provider/credential/image.py:23 #: models_provider/impl/ollama_model_provider/credential/llm.py:48 #: models_provider/impl/ollama_model_provider/credential/reranker.py:35 #: models_provider/impl/vllm_model_provider/credential/llm.py:43 #: models_provider/impl/xinference_model_provider/credential/embedding.py:24 #: models_provider/impl/xinference_model_provider/credential/llm.py:44 msgid "API domain name is invalid" msgstr "" #: models_provider/impl/ollama_model_provider/credential/embedding.py:35 #: models_provider/impl/ollama_model_provider/credential/image.py:28 #: models_provider/impl/ollama_model_provider/credential/llm.py:53 #: models_provider/impl/ollama_model_provider/credential/reranker.py:40 #: models_provider/impl/vllm_model_provider/credential/llm.py:47 #: models_provider/impl/xinference_model_provider/credential/embedding.py:30 #: models_provider/impl/xinference_model_provider/credential/llm.py:48 msgid "The model does not exist, please download the model first" msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 msgid "" "Llama 2 is a set of pretrained and fine-tuned generative text models ranging " "in size from 7 billion to 70 billion. This is a repository of 7B pretrained " "models. Links to other models can be found in the index at the bottom." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 msgid "" "Llama 2 is a set of pretrained and fine-tuned generative text models ranging " "in size from 7 billion to 70 billion. This is a repository of 13B pretrained " "models. Links to other models can be found in the index at the bottom." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 msgid "" "Llama 2 is a set of pretrained and fine-tuned generative text models ranging " "in size from 7 billion to 70 billion. This is a repository of 70B pretrained " "models. Links to other models can be found in the index at the bottom." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 msgid "" "Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " "instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " "that it has strong Chinese conversation capabilities." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 msgid "" "Meta Llama 3: The most capable public product LLM to date. 8 billion " "parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 msgid "" "Meta Llama 3: The most capable public product LLM to date. 70 billion " "parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 msgid "" "Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " "the model's alignment with human preferences and its multi-language " "processing capabilities. Models of all sizes support a context length of " "32768 tokens. 500 million parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 msgid "" "Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " "the model's alignment with human preferences and its multi-language " "processing capabilities. Models of all sizes support a context length of " "32768 tokens. 1.8 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 msgid "" "Compared with previous versions, qwen 1.5 4b has significantly enhanced the " "model's alignment with human preferences and its multi-language processing " "capabilities. Models of all sizes support a context length of 32768 tokens. " "4 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 msgid "" "Compared with previous versions, qwen 1.5 7b has significantly enhanced the " "model's alignment with human preferences and its multi-language processing " "capabilities. Models of all sizes support a context length of 32768 tokens. " "7 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 msgid "" "Compared with previous versions, qwen 1.5 14b has significantly enhanced the " "model's alignment with human preferences and its multi-language processing " "capabilities. Models of all sizes support a context length of 32768 tokens. " "14 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 msgid "" "Compared with previous versions, qwen 1.5 32b has significantly enhanced the " "model's alignment with human preferences and its multi-language processing " "capabilities. Models of all sizes support a context length of 32768 tokens. " "32 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 msgid "" "Compared with previous versions, qwen 1.5 72b has significantly enhanced the " "model's alignment with human preferences and its multi-language processing " "capabilities. Models of all sizes support a context length of 32768 tokens. " "72 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 msgid "" "Compared with previous versions, qwen 1.5 110b has significantly enhanced " "the model's alignment with human preferences and its multi-language " "processing capabilities. Models of all sizes support a context length of " "32768 tokens. 110 billion parameters." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 msgid "" "Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " "model." msgstr "" #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 #: models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 msgid "" "A high-performance open embedding model with a large token context window." msgstr "" #: models_provider/impl/openai_model_provider/credential/tti.py:16 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 msgid "" "The image generation endpoint allows you to create raw images based on text " "prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " "or 1792x1024 pixels." msgstr "" #: models_provider/impl/openai_model_provider/credential/tti.py:29 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 msgid "" " \n" "By default, images are produced in standard quality, but with DALL·E 3 you " "can set quality: \"hd\" to enhance detail. Square, standard quality images " "are generated fastest.\n" " " msgstr "" #: models_provider/impl/openai_model_provider/credential/tti.py:44 #: models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 msgid "" "You can use DALL·E 3 to request 1 image at a time (requesting more images by " "issuing parallel requests), or use DALL·E 2 with the n parameter to request " "up to 10 images at a time." msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:35 #: models_provider/impl/openai_model_provider/openai_model_provider.py:119 #: models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:118 msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:38 msgid "Latest gpt-4, updated with OpenAI adjustments" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:40 #: models_provider/impl/openai_model_provider/openai_model_provider.py:99 msgid "" "The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " "adjustments" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:43 #: models_provider/impl/openai_model_provider/openai_model_provider.py:102 msgid "" "The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " "adjustments" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:46 msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:49 msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:53 msgid "" "gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " "tokens" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:57 msgid "" "gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " "tokens" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:61 msgid "" "[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " "13, 2024" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:65 msgid "" "gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:69 msgid "" "gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " "tokens" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:72 msgid "" "gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " "tokens" msgstr "" #: models_provider/impl/openai_model_provider/openai_model_provider.py:75 msgid "" "gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " "tokens" msgstr "" #: models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 msgid "Tencent Cloud" msgstr "" #: models_provider/impl/tencent_model_provider/credential/llm.py:41 #: models_provider/impl/tencent_model_provider/credential/tti.py:88 #, python-brace-format msgid "{keys} is required" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:14 msgid "painting style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:14 msgid "If not passed, the default value is 201 (Japanese anime style)" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:18 msgid "Not limited to style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:19 msgid "ink painting" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:20 msgid "concept art" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:21 msgid "Oil painting 1" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:22 msgid "Oil Painting 2 (Van Gogh)" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:23 msgid "watercolor painting" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:24 msgid "pixel art" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:25 msgid "impasto style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:26 msgid "illustration" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:27 msgid "paper cut style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:28 msgid "Impressionism 1 (Monet)" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:29 msgid "Impressionism 2" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:31 msgid "classical portraiture" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:32 msgid "black and white sketch" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:33 msgid "cyberpunk" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:34 msgid "science fiction style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:35 msgid "dark style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:37 msgid "vaporwave" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:38 msgid "Japanese animation" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:39 msgid "monster style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:40 msgid "Beautiful ancient style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:41 msgid "retro anime" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:42 msgid "Game cartoon hand drawing" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:43 msgid "Universal realistic style" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:50 msgid "Generate image resolution" msgstr "" #: models_provider/impl/tencent_model_provider/credential/tti.py:50 msgid "If not transmitted, the default value is 768:768." msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 msgid "" "The most effective version of the current hybrid model, the trillion-level " "parameter scale MOE-32K long article model. Reaching the absolute leading " "level on various benchmarks, with complex instructions and reasoning, " "complex mathematical capabilities, support for function call, and " "application focus optimization in fields such as multi-language translation, " "finance, law, and medical care" msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 msgid "" "A better routing strategy is adopted to simultaneously alleviate the " "problems of load balancing and expert convergence. For long articles, the " "needle-in-a-haystack index reaches 99.9%" msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 msgid "" "Upgraded to MOE structure, the context window is 256k, leading many open " "source models in multiple evaluation sets such as NLP, code, mathematics, " "industry, etc." msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 msgid "" "Hunyuan's latest version of the role-playing model, a role-playing model " "launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " "model combined with the role-playing scene data set for additional training, " "and has better basic effects in role-playing scenes." msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 msgid "" "Hunyuan's latest MOE architecture FunctionCall model has been trained with " "high-quality FunctionCall data and has a context window of 32K, leading in " "multiple dimensions of evaluation indicators." msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 msgid "" "Hunyuan's latest code generation model, after training the base model with " "200B high-quality code data, and iterating on high-quality SFT data for half " "a year, the context long window length has been increased to 8K, and it " "ranks among the top in the automatic evaluation indicators of code " "generation in the five major languages; the five major languages In the " "manual high-quality evaluation of 10 comprehensive code tasks that consider " "all aspects, the performance is in the first echelon." msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 msgid "" "Tencent's Hunyuan Embedding interface can convert text into high-quality " "vector data. The vector dimension is 1024 dimensions." msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 msgid "Mixed element visual model" msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 msgid "Hunyuan graph model" msgstr "" #: models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 msgid "Tencent Hunyuan" msgstr "" #: models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 #: models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 msgid "Facebook’s 125M parameter model" msgstr "" #: models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 msgid "BAAI’s 7B parameter model" msgstr "" #: models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 msgid "BAAI’s 13B parameter mode" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 msgid "" "If the gap between width, height and 512 is too large, the picture rendering " "effect will be poor and the probability of excessive delay will increase " "significantly. Recommended ratio and corresponding width and height before " "super score: width*height" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 #: models_provider/impl/xinference_model_provider/credential/tts.py:15 msgid "timbre" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:31 #: models_provider/impl/xf_model_provider/credential/tts.py:28 msgid "speaking speed" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:31 msgid "[0.2,3], the default is 1, usually one decimal place is enough" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 msgid "" "The user goes to the model inference page of Volcano Ark to create an " "inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " "it." msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 msgid "Universal 2.0-Vincent Diagram" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 msgid "Universal 2.0Pro-Vincent Chart" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 msgid "Universal 1.4-Vincent Chart" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 msgid "Animation 1.3.0-Vincent Picture" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 msgid "Animation 1.3.1-Vincent Picture" msgstr "" #: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 msgid "volcano engine" msgstr "" #: models_provider/impl/wenxin_model_provider/credential/llm.py:51 #, python-brace-format msgid "{model_name} The model does not support" msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 msgid "" "ERNIE-Bot-4 is a large language model independently developed by Baidu. It " "covers massive Chinese data and has stronger capabilities in dialogue Q&A, " "content creation and generation." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 msgid "" "ERNIE-Bot is a large language model independently developed by Baidu. It " "covers massive Chinese data and has stronger capabilities in dialogue Q&A, " "content creation and generation." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 msgid "" "ERNIE-Bot-turbo is a large language model independently developed by Baidu. " "It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " "content creation and generation, and has a faster response speed." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 msgid "" "BLOOMZ-7B is a well-known large language model in the industry. It was " "developed and open sourced by BigScience and can output text in 46 languages " "and 13 programming languages." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 msgid "" "Llama-2-13b-chat was developed by Meta AI and is open source. It performs " "well in scenarios such as coding, reasoning and knowledge application. " "Llama-2-13b-chat is a native open source version with balanced performance " "and effect, suitable for conversation scenarios." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 msgid "" "Llama-2-70b-chat was developed by Meta AI and is open source. It performs " "well in scenarios such as coding, reasoning, and knowledge application. " "Llama-2-70b-chat is a native open source version with high-precision effects." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 msgid "" "The Chinese enhanced version developed by the Qianfan team based on " "Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" "EVAL." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 msgid "" "Embedding-V1 is a text representation model based on Baidu Wenxin large " "model technology. It can convert text into a vector form represented by " "numerical values and can be used in text retrieval, information " "recommendation, knowledge mining and other scenarios. Embedding-V1 provides " "the Embeddings interface, which can generate corresponding vector " "representations based on input content. You can call this interface to input " "text into the model and obtain the corresponding vector representation for " "subsequent text processing and analysis." msgstr "" #: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 msgid "Thousand sails large model" msgstr "" #: models_provider/impl/xf_model_provider/credential/image.py:42 msgid "Please outline this picture" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:15 msgid "Speaker" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:16 msgid "" "Speaker, optional value: Please go to the console to add a trial or purchase " "speaker. After adding, the speaker parameter value will be displayed." msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:21 msgid "iFlytek Xiaoyan" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:22 msgid "iFlytek Xujiu" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:23 msgid "iFlytek Xiaoping" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:24 msgid "iFlytek Xiaojing" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:25 msgid "iFlytek Xuxiaobao" msgstr "" #: models_provider/impl/xf_model_provider/credential/tts.py:28 msgid "Speech speed, optional value: [0-100], default is 50" msgstr "" #: models_provider/impl/xf_model_provider/xf_model_provider.py:39 #: models_provider/impl/xf_model_provider/xf_model_provider.py:50 msgid "Chinese and English recognition" msgstr "" #: models_provider/impl/xf_model_provider/xf_model_provider.py:66 msgid "iFlytek Spark" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tti.py:15 msgid "" "The image generation endpoint allows you to create raw images based on text " "prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " "1792x1024 pixels." msgstr "" #: models_provider/impl/xinference_model_provider/credential/tti.py:29 msgid "" "By default, images are generated in standard quality, you can set quality: " "\"hd\" to enhance detail. Square, standard quality images are generated " "fastest." msgstr "" #: models_provider/impl/xinference_model_provider/credential/tti.py:42 msgid "" "You can request 1 image at a time (requesting more images by making parallel " "requests), or up to 10 images at a time using the n parameter." msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:20 msgid "Chinese female" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:21 msgid "Chinese male" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:22 msgid "Japanese male" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:23 msgid "Cantonese female" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:24 msgid "English female" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:25 msgid "English male" msgstr "" #: models_provider/impl/xinference_model_provider/credential/tts.py:26 msgid "Korean female" msgstr "" #: models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 msgid "" "Code Llama is a language model specifically designed for code generation." msgstr "" #: models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 msgid "" " \n" "Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " "designed to perform specific tasks.\n" " " msgstr "" #: models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 msgid "" "Code Llama Python is a language model specifically designed for Python code " "generation." msgstr "" #: models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 msgid "" "CodeQwen 1.5 is a language model for code generation with high performance." msgstr "" #: models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." msgstr "" #: models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 msgid "Deepseek is a large-scale language model with 13 billion parameters." msgstr "" #: models_provider/impl/zhipu_model_provider/credential/tti.py:16 msgid "" "Image size, only cogview-3-plus supports this parameter. Optional range: " "[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " "default is 1024x1024." msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 msgid "" "Have strong multi-modal understanding capabilities. Able to understand up to " "five images simultaneously and supports video content understanding" msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 msgid "" "Focus on single picture understanding. Suitable for scenarios requiring " "efficient image analysis" msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 msgid "" "Focus on single picture understanding. Suitable for scenarios requiring " "efficient image analysis (free)" msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 msgid "" "Quickly and accurately generate images based on user text descriptions. " "Resolution supports 1024x1024" msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 msgid "" "Generate high-quality images based on user text descriptions, supporting " "multiple image sizes" msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 msgid "" "Generate high-quality images based on user text descriptions, supporting " "multiple image sizes (free)" msgstr "" #: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 msgid "zhipu AI" msgstr "" #: models_provider/serializers/model_serializer.py:43 #: models_provider/serializers/model_serializer.py:222 #: models_provider/serializers/model_serializer.py:259 #: models_provider/serializers/model_serializer.py:323 msgid "base model" msgstr "" #: models_provider/serializers/model_serializer.py:44 #: models_provider/serializers/model_serializer.py:260 msgid "parameter configuration" msgstr "" #: models_provider/serializers/model_serializer.py:45 #: models_provider/serializers/model_serializer.py:225 #: models_provider/serializers/model_serializer.py:261 msgid "certification information" msgstr "" #: models_provider/serializers/model_serializer.py:233 #: models_provider/serializers/model_serializer.py:272 #, python-brace-format msgid "base model【{model_name}】already exists" msgstr "" #: models_provider/serializers/model_serializer.py:312 msgid "Model saving failed" msgstr "" #: models_provider/serializers/model_serializer.py:325 msgid "create user" msgstr "" #: models_provider/views/model.py:28 models_provider/views/model.py:29 #: models_provider/views/model.py:30 msgid "Create model" msgstr "" #: models_provider/views/model.py:31 models_provider/views/model.py:59 #: models_provider/views/model.py:77 models_provider/views/model.py:90 #: models_provider/views/model.py:102 models_provider/views/model.py:117 #: models_provider/views/model.py:130 models_provider/views/model.py:148 #: models_provider/views/model.py:164 models_provider/views/provide.py:25 #: models_provider/views/provide.py:49 models_provider/views/provide.py:64 #: models_provider/views/provide.py:83 models_provider/views/provide.py:101 msgid "Model" msgstr "" #: models_provider/views/model.py:54 models_provider/views/model.py:55 #: models_provider/views/model.py:56 msgid "Query model list" msgstr "" #: models_provider/views/model.py:71 models_provider/views/model.py:72 #: models_provider/views/model.py:73 msgid "Update model" msgstr "" #: models_provider/views/model.py:85 models_provider/views/model.py:86 #: models_provider/views/model.py:87 msgid "Delete model" msgstr "" #: models_provider/views/model.py:97 models_provider/views/model.py:98 #: models_provider/views/model.py:99 msgid "Query model details" msgstr "" #: models_provider/views/model.py:112 models_provider/views/model.py:113 #: models_provider/views/model.py:114 msgid "Get model parameter form" msgstr "" #: models_provider/views/model.py:124 models_provider/views/model.py:125 #: models_provider/views/model.py:126 msgid "Save model parameter form" msgstr "" #: models_provider/views/model.py:141 models_provider/views/model.py:143 #: models_provider/views/model.py:145 msgid "" "Query model meta information, this interface does not carry authentication " "information" msgstr "" #: models_provider/views/model.py:158 models_provider/views/model.py:159 #: models_provider/views/model.py:160 msgid "Pause model download" msgstr "" #: models_provider/views/provide.py:21 models_provider/views/provide.py:22 #: models_provider/views/provide.py:23 msgid "Get a list of model suppliers" msgstr "" #: models_provider/views/provide.py:44 models_provider/views/provide.py:45 #: models_provider/views/provide.py:46 msgid "Get a list of model types" msgstr "" #: models_provider/views/provide.py:59 models_provider/views/provide.py:60 #: models_provider/views/provide.py:61 msgid "Example of obtaining model list" msgstr "" #: models_provider/views/provide.py:78 models_provider/views/provide.py:79 #: models_provider/views/provide.py:80 msgid "Get model default parameters" msgstr "" #: models_provider/views/provide.py:96 models_provider/views/provide.py:97 #: models_provider/views/provide.py:98 msgid "Get the model creation form" msgstr "" #: tools/serializers/tool.py:91 tools/serializers/tool.py:153 msgid "variable name" msgstr "" #: tools/serializers/tool.py:93 msgid "type" msgstr "" #: tools/serializers/tool.py:95 msgid "fields only support string|int|dict|array|float" msgstr "" #: tools/serializers/tool.py:99 msgid "The field only supports custom|reference" msgstr "" #: tools/serializers/tool.py:104 msgid "field name" msgstr "" #: tools/serializers/tool.py:105 msgid "field label" msgstr "" #: tools/serializers/tool.py:115 tools/serializers/tool.py:133 #: tools/serializers/tool.py:340 msgid "tool name" msgstr "" #: tools/serializers/tool.py:118 tools/serializers/tool.py:136 msgid "tool description" msgstr "" #: tools/serializers/tool.py:120 tools/serializers/tool.py:138 #: tools/serializers/tool.py:158 msgid "tool content" msgstr "" #: tools/serializers/tool.py:123 tools/serializers/tool.py:141 #: tools/serializers/tool.py:160 msgid "input field list" msgstr "" #: tools/serializers/tool.py:125 tools/serializers/tool.py:143 #: tools/serializers/tool.py:161 msgid "init field list" msgstr "" #: tools/serializers/tool.py:145 tools/serializers/tool.py:162 msgid "init params" msgstr "" #: tools/serializers/tool.py:154 msgid "variable value" msgstr "" #: tools/serializers/tool.py:218 msgid "field has no value set" msgstr "" #: tools/serializers/tool.py:234 tools/serializers/tool.py:239 msgid "type error" msgstr "" #: tools/serializers/tool.py:242 #, python-brace-format msgid "Field: {name} Type: {_type} Value: {value} Type conversion error" msgstr "" #: tools/serializers/tool.py:247 msgid "tool id" msgstr "" #: tools/serializers/tool.py:255 msgid "Tool not found" msgstr "" #: tools/serializers/tool.py:290 msgid "file" msgstr "" #: tools/serializers/tool.py:291 users/api/user.py:39 users/api/user.py:51 #: users/api/user.py:67 users/serializers/user.py:262 msgid "User ID" msgstr "" #: tools/serializers/tool.py:304 msgid "Unsupported file format" msgstr "" #: tools/serializers/tool.py:330 tools/serializers/tool.py:349 msgid "Folder not found" msgstr "" #: tools/serializers/tool.py:341 msgid "tool type" msgstr "" #: tools/views/tool.py:21 tools/views/tool.py:22 msgid "Create tool" msgstr "" #: tools/views/tool.py:26 tools/views/tool.py:40 tools/views/tool.py:57 #: tools/views/tool.py:75 tools/views/tool.py:89 tools/views/tool.py:103 #: tools/views/tool.py:120 tools/views/tool.py:144 tools/views/tool.py:161 msgid "Tool" msgstr "" #: tools/views/tool.py:36 tools/views/tool.py:37 msgid "Get tool by folder" msgstr "" #: tools/views/tool.py:53 tools/views/tool.py:54 msgid "Debug Tool" msgstr "" #: tools/views/tool.py:70 tools/views/tool.py:71 msgid "Update tool" msgstr "" #: tools/views/tool.py:85 tools/views/tool.py:86 msgid "Get tool" msgstr "" #: tools/views/tool.py:99 tools/views/tool.py:100 msgid "Delete tool" msgstr "" #: tools/views/tool.py:116 tools/views/tool.py:117 msgid "Get tool list by pagination" msgstr "" #: tools/views/tool.py:139 tools/views/tool.py:140 msgid "Import tool" msgstr "" #: tools/views/tool.py:157 tools/views/tool.py:158 msgid "Export tool" msgstr "" #: users/api/user.py:90 msgid "Email or Username" msgstr "" #: users/api/user.py:106 msgid "Workspace ID" msgstr "" #: users/serializers/login.py:27 users/serializers/user.py:40 #: users/serializers/user.py:87 msgid "Username" msgstr "" #: users/serializers/login.py:28 users/serializers/user.py:41 #: users/serializers/user.py:99 users/serializers/user.py:228 msgid "Password" msgstr "" #: users/serializers/login.py:29 users/serializers/login.py:69 msgid "captcha" msgstr "" #: users/serializers/login.py:36 msgid "token" msgstr "" #: users/serializers/login.py:50 msgid "Captcha code error or expiration" msgstr "" #: users/serializers/login.py:55 msgid "The user has been disabled, please contact the administrator!" msgstr "" #: users/serializers/user.py:31 msgid "Is Edit Password" msgstr "" #: users/serializers/user.py:32 msgid "permissions" msgstr "" #: users/serializers/user.py:42 users/serializers/user.py:79 #: users/serializers/user.py:191 msgid "Email" msgstr "" #: users/serializers/user.py:43 users/serializers/user.py:113 msgid "Nick name" msgstr "" #: users/serializers/user.py:44 users/serializers/user.py:120 #: users/serializers/user.py:206 msgid "Phone" msgstr "" #: users/serializers/user.py:93 msgid "Username must be 6-20 characters long" msgstr "" #: users/serializers/user.py:106 users/serializers/user.py:235 msgid "" "The password must be 6-20 characters long and must be a combination of " "letters, numbers, and special characters." msgstr "" #: users/serializers/user.py:142 msgid "Email or username" msgstr "" #: users/serializers/user.py:168 msgid "" "The community version supports up to 2 users. If you need more users, please " "contact us (https://fit2cloud.com/)." msgstr "" #: users/serializers/user.py:199 msgid "Name" msgstr "" #: users/serializers/user.py:213 msgid "Is Active" msgstr "" #: users/serializers/user.py:223 msgid "Email is already in use" msgstr "" #: users/serializers/user.py:242 msgid "Re Password" msgstr "" #: users/serializers/user.py:247 msgid "" "The confirmation password must be 6-20 characters long and must be a " "combination of letters, numbers, and special characters." msgstr "" #: users/serializers/user.py:270 msgid "User does not exist" msgstr "" #: users/serializers/user.py:285 msgid "Unable to delete administrator" msgstr "" #: users/serializers/user.py:302 msgid "Cannot modify administrator status" msgstr "" #: users/views/login.py:21 users/views/login.py:22 users/views/login.py:23 msgid "Log in" msgstr "" #: users/views/login.py:24 users/views/login.py:36 users/views/user.py:31 #: users/views/user.py:44 users/views/user.py:58 users/views/user.py:73 #: users/views/user.py:87 users/views/user.py:98 users/views/user.py:109 #: users/views/user.py:125 users/views/user.py:140 msgid "User management" msgstr "" #: users/views/login.py:33 users/views/login.py:34 users/views/login.py:35 msgid "Get captcha" msgstr "" #: users/views/user.py:28 users/views/user.py:29 users/views/user.py:30 #: users/views/user.py:41 users/views/user.py:42 msgid "Get current user information" msgstr "" #: users/views/user.py:70 users/views/user.py:71 users/views/user.py:72 msgid "Create user" msgstr "" #: users/views/user.py:84 users/views/user.py:85 users/views/user.py:86 msgid "Delete user" msgstr "" #: users/views/user.py:95 users/views/user.py:96 users/views/user.py:97 msgid "Get user information" msgstr "" #: users/views/user.py:106 users/views/user.py:107 users/views/user.py:108 msgid "Update user information" msgstr "" #: users/views/user.py:122 users/views/user.py:123 users/views/user.py:124 msgid "Change password" msgstr "" #: users/views/user.py:137 users/views/user.py:138 users/views/user.py:139 msgid "Get user paginated list" msgstr ""