maxkb/apps/locales/zh_CN/LC_MESSAGES/django.po
2025-04-29 15:45:47 +08:00

2716 lines
112 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2025-04-29 14:48+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"Language: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
#: common/auth/authenticate.py:80
msgid "Not logged in, please log in first"
msgstr "未登录,请先登录"
#: common/auth/authenticate.py:82 common/auth/authenticate.py:89
#: common/auth/authenticate.py:95
msgid "Authentication information is incorrect! illegal user"
msgstr "身份验证信息不正确!非法用户"
#: common/auth/authentication.py:96
msgid "No permission to access"
msgstr "无权限访问"
#: common/auth/handle/impl/user_token.py:242
msgid "Login expired"
msgstr "登录已过期"
#: common/constants/exception_code_constants.py:31
#: users/serializers/login.py:53
msgid "The username or password is incorrect"
msgstr "用户名或密码不正确"
#: common/constants/exception_code_constants.py:32
msgid "Please log in first and bring the user Token"
msgstr "请先登录并携带用户 Token"
#: common/constants/exception_code_constants.py:33
#| msgid "Model saving failed"
msgid "Email sending failed"
msgstr "邮件发送失败"
#: common/constants/exception_code_constants.py:34
msgid "Email format error"
msgstr "邮箱格式错误"
#: common/constants/exception_code_constants.py:35
#| msgid "The user has been disabled, please contact the administrator!"
msgid "The email has been registered, please log in directly"
msgstr "该邮箱已注册,请直接登录"
#: common/constants/exception_code_constants.py:36
#| msgid "The model does not exist, please download the model first"
msgid "The email is not registered, please register first"
msgstr "该邮箱未注册,请先注册"
#: common/constants/exception_code_constants.py:38
msgid "The verification code is incorrect or the verification code has expired"
msgstr "验证码不正确或已过期"
#: common/constants/exception_code_constants.py:39
#| msgid "The user has been disabled, please contact the administrator!"
msgid "The username has been registered, please log in directly"
msgstr "用户名已注册,请直接登录"
#: common/constants/exception_code_constants.py:41
msgid ""
"The username cannot be empty and must be between 6 and 20 characters long."
msgstr "用户名不能为空且长度在6到20个字符之间。"
#: common/constants/exception_code_constants.py:43
msgid "Password and confirmation password are inconsistent"
msgstr "密码和确认密码不一致"
#: common/event/__init__.py:27
msgid "The download process was interrupted, please try again"
msgstr "下载过程被中断,请重试"
#: common/event/listener_manage.py:90
#, python-brace-format
msgid "Query vector data: {paragraph_id_list} error {error} {traceback}"
msgstr "查询向量数据:{paragraph_id_list} 错误:{error} {traceback}"
#: common/event/listener_manage.py:95
#, python-brace-format
msgid "Start--->Embedding paragraph: {paragraph_id_list}"
msgstr "开始--->向量段落: {paragraph_id_list}"
#: common/event/listener_manage.py:107
#, python-brace-format
msgid "Vectorized paragraph: {paragraph_id_list} error {error} {traceback}"
msgstr "向量段落: {paragraph_id_list} 错误:{error} {traceback}"
#: common/event/listener_manage.py:113
#, python-brace-format
msgid "End--->Embedding paragraph: {paragraph_id_list}"
msgstr "结束--->向量段落: {paragraph_id_list}"
#: common/event/listener_manage.py:122
#, python-brace-format
msgid "Start--->Embedding paragraph: {paragraph_id}"
msgstr "开始--->向量段落: {paragraph_id}"
#: common/event/listener_manage.py:147
#, python-brace-format
msgid "Vectorized paragraph: {paragraph_id} error {error} {traceback}"
msgstr "向量段落: {paragraph_id} 错误:{error} {traceback}"
#: common/event/listener_manage.py:152
#, python-brace-format
msgid "End--->Embedding paragraph: {paragraph_id}"
msgstr "结束--->向量段落: {paragraph_id}"
#: common/event/listener_manage.py:268
#, python-brace-format
msgid "Start--->Embedding document: {document_id}"
msgstr "开始--->向量文档: {document_id}"
#: common/event/listener_manage.py:288
#, python-brace-format
msgid "Vectorized document: {document_id} error {error} {traceback}"
msgstr "向量文档: {document_id} 错误:{error} {traceback}"
#: common/event/listener_manage.py:293
#, python-brace-format
msgid "End--->Embedding document: {document_id}"
msgstr "结束--->向量文档: {document_id}"
#: common/event/listener_manage.py:304
#, python-brace-format
msgid "Start--->Embedding knowledge: {knowledge_id}"
msgstr "开始--->向量知识库: {knowledge_id}"
#: common/event/listener_manage.py:308
#, python-brace-format
msgid "Start--->Embedding document: {document_list}"
msgstr "开始--->向量文档: {document_list}"
#: common/event/listener_manage.py:312 knowledge/task/embedding.py:116
#, python-brace-format
msgid "Vectorized knowledge: {knowledge_id} error {error} {traceback}"
msgstr "向量知识库: {knowledge_id} 错误:{error} {traceback}"
#: common/event/listener_manage.py:315
#, python-brace-format
msgid "End--->Embedding knowledge: {knowledge_id}"
msgstr "结束--->向量知识库: {knowledge_id}"
#: common/exception/handle_exception.py:32
msgid "Unknown exception"
msgstr "未知错误"
#: common/forms/base_field.py:64
#, python-brace-format
msgid "The field {field_label} is required"
msgstr "{field_label} 字段是必填项"
#: common/forms/slider_field.py:56
#, python-brace-format
msgid "The {field_label} cannot be less than {min}"
msgstr "{field_label} 不能小于{min}"
#: common/forms/slider_field.py:62
#, python-brace-format
msgid "The {field_label} cannot be greater than {max}"
msgstr "{field_label} 不能大于{max}"
#: common/result/api.py:17 common/result/api.py:27
msgid "response code"
msgstr "响应码"
#: common/result/api.py:18 common/result/api.py:19 common/result/api.py:28
#: common/result/api.py:29
msgid "error prompt"
msgstr "错误提示"
#: common/result/api.py:43
msgid "total number of data"
msgstr "总数据"
#: common/result/api.py:44
msgid "current page"
msgstr "当前页"
#: common/result/api.py:45
msgid "page size"
msgstr "每页大小"
#: common/result/result.py:31
msgid "Success"
msgstr "成功"
#: common/utils/common.py:85
msgid "Text-to-speech node, the text content must be of string type"
msgstr "文本转语音节点,文本内容必须是字符串类型"
#: common/utils/common.py:87
msgid "Text-to-speech node, the text content cannot be empty"
msgstr "文本转语音节点,文本内容不能为空"
#: common/utils/common.py:239
#, python-brace-format
msgid "Limit {count} exceeded, please contact us (https://fit2cloud.com/)."
msgstr "超过限制 {count},请联系我们 (https://fit2cloud.com/)."
#: folders/models/folder.py:6 folders/models/folder.py:13
#: folders/serializers/folder.py:86
#| msgid "model name"
msgid "folder name"
msgstr "文件夹名称"
#: folders/models/folder.py:9 folders/models/folder.py:15
#: folders/serializers/folder.py:89
msgid "parent id"
msgstr "父级 ID"
#: folders/serializers/folder.py:63
msgid "Folder depth cannot exceed 3 levels"
msgstr "文件夹深度不能超过3级"
#: folders/serializers/folder.py:85 folders/serializers/folder.py:121
#: knowledge/serializers/knowledge.py:27 knowledge/serializers/knowledge.py:34
#: tools/serializers/tool.py:339
#| msgid "user id"
msgid "folder id"
msgstr "文件夹 ID"
#: folders/serializers/folder.py:87
#| msgid "module user id"
msgid "folder user id"
msgstr "文件夹用户 ID"
#: folders/serializers/folder.py:88 folders/serializers/folder.py:122
#: folders/serializers/folder.py:166 knowledge/serializers/knowledge.py:44
#: models_provider/api/model.py:40 models_provider/api/model.py:53
#: models_provider/serializers/model_serializer.py:262
#: models_provider/serializers/model_serializer.py:326
#: tools/serializers/tool.py:169 tools/serializers/tool.py:190
#: tools/serializers/tool.py:248 tools/serializers/tool.py:292
#: tools/serializers/tool.py:322 tools/serializers/tool.py:338
msgid "workspace id"
msgstr "工作空间ID"
#: folders/serializers/folder.py:92 knowledge/serializers/knowledge.py:43
#: models_provider/serializers/model_serializer.py:108
#: models_provider/serializers/model_serializer.py:215
#: models_provider/serializers/model_serializer.py:255
#: tools/serializers/tool.py:168 tools/serializers/tool.py:189
msgid "user id"
msgstr "用户ID"
#: folders/serializers/folder.py:93 folders/serializers/folder.py:123
#: folders/serializers/folder.py:167 tools/serializers/tool.py:97
msgid "source"
msgstr "来源"
#: folders/serializers/folder.py:106
#| msgid "Module name already exists"
msgid "Folder name already exists"
msgstr "文件夹名称已存在"
#: folders/serializers/folder.py:132
#| msgid "Model does not exist"
msgid "Folder does not exist"
msgstr "文件夹不存在"
#: folders/serializers/folder.py:160
#| msgid "Cannot delete root module"
msgid "Cannot delete root folder"
msgstr "无法删除根文件夹"
#: folders/views/folder.py:19 folders/views/folder.py:20
#| msgid "Create model"
msgid "Create folder"
msgstr "创建文件夹"
#: folders/views/folder.py:24 folders/views/folder.py:41
#: folders/views/folder.py:60 folders/views/folder.py:75
#: folders/views/folder.py:90
msgid "Folder"
msgstr "文件夹"
#: folders/views/folder.py:37 folders/views/folder.py:38
#| msgid "Get module tree"
msgid "Get folder tree"
msgstr "获取文件夹树"
#: folders/views/folder.py:55 folders/views/folder.py:56
#| msgid "Update model"
msgid "Update folder"
msgstr "更新文件夹"
#: folders/views/folder.py:71 folders/views/folder.py:72
#| msgid "Get module"
msgid "Get folder"
msgstr "获取文件夹"
#: folders/views/folder.py:86 folders/views/folder.py:87
#| msgid "Delete model"
msgid "Delete folder"
msgstr "删除文件夹"
#: knowledge/serializers/common.py:98 knowledge/serializers/knowledge.py:37
#| msgid "source"
msgid "source url"
msgstr "来源"
#: knowledge/serializers/common.py:99
msgid "selector"
msgstr "选择器"
#: knowledge/serializers/common.py:106
#, python-brace-format
msgid "URL error, cannot parse [{source_url}]"
msgstr "URL 错误,无法解析 [{source_url}]"
#: knowledge/serializers/common.py:114
#| msgid "init field list"
msgid "id list"
msgstr "ID 列表"
#: knowledge/serializers/common.py:124
#| msgid "The following fields are required: {keys}"
msgid "The following id does not exist: {error_id_list}"
msgstr "以下ID不存在: {error_id_list}"
#: knowledge/serializers/common.py:181 knowledge/serializers/common.py:205
msgid "The knowledge base is inconsistent with the vector model"
msgstr "知识库与向量模型不一致"
#: knowledge/serializers/common.py:183 knowledge/serializers/common.py:207
msgid "Knowledge base setting error, please reset the knowledge base"
msgstr "知识库设置错误,请重置知识库"
#: knowledge/serializers/common.py:212
#| msgid "model id"
msgid "Model id"
msgstr "模型ID"
#: knowledge/serializers/common.py:213
msgid "Prompt word"
msgstr "提示词"
#: knowledge/serializers/common.py:215
msgid "state list"
msgstr "状态列表"
#: knowledge/serializers/document.py:26
#| msgid "module name"
msgid "document name"
msgstr "文档名称"
#: knowledge/serializers/document.py:31 knowledge/serializers/knowledge.py:26
#: knowledge/serializers/knowledge.py:33
#| msgid "model name"
msgid "knowledge name"
msgstr "知识库名称"
#: knowledge/serializers/document.py:32 knowledge/serializers/knowledge.py:28
#: knowledge/serializers/knowledge.py:35
#| msgid "tool description"
msgid "knowledge description"
msgstr "知识库描述"
#: knowledge/serializers/document.py:33
#| msgid "Embedding Model"
msgid "embedding model"
msgstr "向量模型"
#: knowledge/serializers/document.py:39 knowledge/serializers/document.py:90
#: knowledge/serializers/paragraph.py:58 knowledge/serializers/paragraph.py:150
#| msgid "parent id"
msgid "document id"
msgstr "文档 ID"
#: knowledge/serializers/document.py:40 knowledge/serializers/paragraph.py:149
#| msgid "model name"
msgid "knowledge id"
msgstr "知识库 ID"
#: knowledge/serializers/document.py:46
#| msgid "Module does not exist"
msgid "document id not exist"
msgstr "文档 ID 不存在"
#: knowledge/serializers/document.py:71
#: models_provider/serializers/model_serializer.py:116
#: models_provider/serializers/model_serializer.py:132
#: models_provider/serializers/model_serializer.py:151
#: models_provider/serializers/model_serializer.py:178
#: models_provider/serializers/model_serializer.py:373
#: models_provider/tools.py:111
msgid "Model does not exist"
msgstr "模型不存在"
#: knowledge/serializers/document.py:73
#| msgid "No permission to access"
msgid "No permission to use this model"
msgstr "无权限使用此模型"
#: knowledge/serializers/document.py:87
#| msgid "The user has been disabled, please contact the administrator!"
msgid "The task is being executed, please do not send it repeatedly."
msgstr "任务正在执行,请勿重复发送。"
#: knowledge/serializers/document.py:95
#| msgid "Model does not exist"
msgid "knowledge id not exist"
msgstr "知识库 ID 不存在"
#: knowledge/serializers/knowledge.py:29 knowledge/serializers/knowledge.py:36
msgid "knowledge embedding"
msgstr "知识库向量"
#: knowledge/serializers/knowledge.py:38
msgid "knowledge selector"
msgstr "知识库选择器"
#: knowledge/serializers/knowledge.py:55
msgid ""
"The community version supports up to 50 knowledge bases. If you need more "
"knowledge bases, please contact us (https://fit2cloud.com/)."
msgstr "社区版支持最多50个知识库如需更多知识库请联系我们 (https://fit2cloud.com/)."
#: knowledge/serializers/knowledge.py:64 knowledge/serializers/knowledge.py:123
msgid "Knowledge base name duplicate!"
msgstr "知识库名称重复!"
#: knowledge/serializers/paragraph.py:31 knowledge/serializers/problem.py:15
#| msgid "tool content"
msgid "content"
msgstr "内容"
#: knowledge/serializers/paragraph.py:33 knowledge/serializers/paragraph.py:40
#: knowledge/serializers/paragraph.py:43 knowledge/serializers/paragraph.py:48
#: knowledge/serializers/paragraph.py:50
#| msgid "science fiction style"
msgid "section title"
msgstr "章节标题"
#: knowledge/serializers/paragraph.py:36 tools/serializers/tool.py:127
#: tools/serializers/tool.py:147
msgid "Is active"
msgstr "是否启用"
#: knowledge/serializers/paragraph.py:54
msgid "paragraph id"
msgstr "段落 ID"
#: knowledge/serializers/paragraph.py:56
#| msgid "parent id"
msgid "dataset id"
msgstr "知识库 ID"
#: knowledge/serializers/paragraph.py:63
#| msgid "Model does not exist"
msgid "Paragraph id does not exist"
msgstr "段落 ID 不存在"
#: knowledge/serializers/paragraph.py:99
#| msgid "Model does not exist"
msgid "Problem id does not exist"
msgstr "问题 ID 不存在"
#: knowledge/serializers/paragraph.py:156
#| msgid "The username or password is incorrect"
msgid "The document id is incorrect"
msgstr "文档 ID 不正确"
#: knowledge/serializers/problem.py:14
msgid "problem id"
msgstr "问题 ID"
#: knowledge/task/embedding.py:24 knowledge/task/embedding.py:74
#, python-brace-format
msgid "Failed to obtain vector model: {error} {traceback}"
msgstr "向量模型获取失败: {error} {traceback}"
#: knowledge/task/embedding.py:103
#, python-brace-format
msgid "Start--->Vectorized knowledge: {knowledge_id}"
msgstr "开始--->向量知识库: {knowledge_id}"
#: knowledge/task/embedding.py:107
#, python-brace-format
msgid "Knowledge documentation: {document_names}"
msgstr "知识库文档: {document_names}"
#: knowledge/task/embedding.py:120
#, python-brace-format
msgid "End--->Vectorized knowledge: {knowledge_id}"
msgstr "结束--->向量知识库: {knowledge_id}"
#: knowledge/task/handler.py:107
#, python-brace-format
msgid "Association problem failed {error}"
msgstr "关联问题失败 {error}"
#: knowledge/task/sync.py:29 knowledge/task/sync.py:44
#, python-brace-format
msgid "Start--->Start synchronization web knowledge base:{knowledge_id}"
msgstr "开始--->开始同步 web 知识库:{knowledge_id}"
#: knowledge/task/sync.py:34 knowledge/task/sync.py:48
#, python-brace-format
msgid "End--->End synchronization web knowledge base:{knowledge_id}"
msgstr "结束--->结束同步 web 知识库:{knowledge_id}"
#: knowledge/task/sync.py:36 knowledge/task/sync.py:50
#, python-brace-format
msgid "Synchronize web knowledge base:{knowledge_id} error{error}{traceback}"
msgstr "同步 web 知识库:{knowledge_id} 错误{error}{traceback}"
#: knowledge/views/knowledge.py:19 knowledge/views/knowledge.py:20
#| msgid "Get module"
msgid "Get knowledge by folder"
msgstr "根据文件夹获取知识库"
#: knowledge/views/knowledge.py:23 knowledge/views/knowledge.py:42
#: knowledge/views/knowledge.py:61
msgid "Knowledge Base"
msgstr "知识库"
#: knowledge/views/knowledge.py:37 knowledge/views/knowledge.py:38
#| msgid "Create model"
msgid "Create base knowledge"
msgstr "创建知识库"
#: knowledge/views/knowledge.py:56 knowledge/views/knowledge.py:57
#| msgid "Create model"
msgid "Create web knowledge"
msgstr "创建 web 知识库"
#: maxkb/settings/base.py:85
msgid "Intelligent customer service platform"
msgstr "智能客服平台"
#: models_provider/api/model.py:59
#: models_provider/serializers/model_serializer.py:107
#: models_provider/serializers/model_serializer.py:367
msgid "model id"
msgstr "模型ID"
#: models_provider/api/provide.py:17 models_provider/api/provide.py:23
#: models_provider/api/provide.py:28 models_provider/api/provide.py:30
#: models_provider/api/provide.py:82
#: models_provider/serializers/model_serializer.py:40
#: models_provider/serializers/model_serializer.py:218
#: models_provider/serializers/model_serializer.py:256
#: models_provider/serializers/model_serializer.py:321
msgid "model name"
msgstr "模型名称"
#: models_provider/api/provide.py:18 models_provider/api/provide.py:38
#: models_provider/api/provide.py:76 models_provider/api/provide.py:104
#: models_provider/api/provide.py:126
#: models_provider/serializers/model_serializer.py:41
#: models_provider/serializers/model_serializer.py:257
#: models_provider/serializers/model_serializer.py:324
msgid "provider"
msgstr "供应商"
#: models_provider/api/provide.py:19
msgid "icon"
msgstr "图标"
#: models_provider/api/provide.py:24
msgid "value"
msgstr "值"
#: models_provider/api/provide.py:29 models_provider/api/provide.py:70
#: models_provider/api/provide.py:98
#: models_provider/serializers/model_serializer.py:42
#: models_provider/serializers/model_serializer.py:220
#: models_provider/serializers/model_serializer.py:258
#: models_provider/serializers/model_serializer.py:322
msgid "model type"
msgstr "模型类型"
#: models_provider/api/provide.py:34 tools/serializers/tool.py:107
msgid "input type"
msgstr "输入类型"
#: models_provider/api/provide.py:35
msgid "label"
msgstr "标签"
#: models_provider/api/provide.py:36
msgid "text field"
msgstr "文本字段"
#: models_provider/api/provide.py:37
msgid "value field"
msgstr "值"
#: models_provider/api/provide.py:39
msgid "method"
msgstr "方法"
#: models_provider/api/provide.py:40 tools/serializers/tool.py:92
#: tools/serializers/tool.py:106
msgid "required"
msgstr "必填"
#: models_provider/api/provide.py:41
msgid "default value"
msgstr "默认值"
#: models_provider/api/provide.py:42
msgid "relation show field dict"
msgstr "关系显示字段"
#: models_provider/api/provide.py:43
msgid "relation trigger field dict"
msgstr "关系触发字段"
#: models_provider/api/provide.py:44
msgid "trigger type"
msgstr "触发类型"
#: models_provider/api/provide.py:45
msgid "attrs"
msgstr "属性"
#: models_provider/api/provide.py:46
msgid "props info"
msgstr "props 信息"
#: models_provider/base_model_provider.py:60
msgid "Model type cannot be empty"
msgstr "模型类型不能为空"
#: models_provider/base_model_provider.py:85
msgid "The current platform does not support downloading models"
msgstr "当前平台不支持下载模型"
#: models_provider/base_model_provider.py:143
msgid "LLM"
msgstr "大语言模型"
#: models_provider/base_model_provider.py:144
msgid "Embedding Model"
msgstr "向量模型"
#: models_provider/base_model_provider.py:145
msgid "Speech2Text"
msgstr "语音识别"
#: models_provider/base_model_provider.py:146
msgid "TTS"
msgstr "语音合成"
#: models_provider/base_model_provider.py:147
msgid "Vision Model"
msgstr "视觉模型"
#: models_provider/base_model_provider.py:148
msgid "Image Generation"
msgstr "图片生成"
#: models_provider/base_model_provider.py:149
msgid "Rerank"
msgstr "重排模型"
#: models_provider/base_model_provider.py:223
msgid "The model does not support"
msgstr "模型不支持"
#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42
msgid ""
"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi "
"Lab, developers can integrate high-quality text retrieval and sorting "
"through the LlamaIndex framework."
msgstr ""
"阿里巴巴通义实验室开发的GTE-Rerank文本排序系列模型开发者可以通过LlamaIndex"
"框架进行集成高质量文本检索、排序。"
#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45
msgid ""
"Chinese (including various dialects such as Cantonese), English, Japanese, "
"and Korean support free switching between multiple languages."
msgstr "中文(含粤语等各种方言)、英文、日语、韩语支持多个语种自由切换"
#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48
msgid ""
"CosyVoice is based on a new generation of large generative speech models, "
"which can predict emotions, intonation, rhythm, etc. based on context, and "
"has better anthropomorphic effects."
msgstr ""
"CosyVoice基于新一代生成式语音大模型能根据上下文预测情绪、语调、韵律等具有"
"更好的拟人效果"
#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51
msgid ""
"Universal text vector is Tongyi Lab's multi-language text unified vector "
"model based on the LLM base. It provides high-level vector services for "
"multiple mainstream languages around the world and helps developers quickly "
"convert text data into high-quality vector data."
msgstr ""
"通用文本向量是通义实验室基于LLM底座的多语言文本统一向量模型面向全球多个主"
"流语种,提供高水准的向量服务,帮助开发者将文本数据快速转换为高质量的向量数"
"据。"
#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69
msgid ""
"Tongyi Wanxiang - a large image model for text generation, supports "
"bilingual input in Chinese and English, and supports the input of reference "
"pictures for reference content or reference style migration. Key styles "
"include but are not limited to watercolor, oil painting, Chinese painting, "
"sketch, flat illustration, two-dimensional, and 3D. Cartoon."
msgstr ""
"通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容"
"或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二"
"次元、3D卡通。"
#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95
msgid "Alibaba Cloud Bailian"
msgstr "阿里云百炼"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:53
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:50
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:74
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:61
#: models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43
#: models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37
#: models_provider/impl/anthropic_model_provider/credential/image.py:33
#: models_provider/impl/anthropic_model_provider/credential/llm.py:57
#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53
#: models_provider/impl/azure_model_provider/credential/embedding.py:37
#: models_provider/impl/azure_model_provider/credential/image.py:40
#: models_provider/impl/azure_model_provider/credential/llm.py:69
#: models_provider/impl/deepseek_model_provider/credential/llm.py:57
#: models_provider/impl/gemini_model_provider/credential/embedding.py:36
#: models_provider/impl/gemini_model_provider/credential/image.py:32
#: models_provider/impl/gemini_model_provider/credential/llm.py:57
#: models_provider/impl/gemini_model_provider/model/stt.py:43
#: models_provider/impl/kimi_model_provider/credential/llm.py:57
#: models_provider/impl/local_model_provider/credential/embedding.py:36
#: models_provider/impl/local_model_provider/credential/reranker.py:37
#: models_provider/impl/ollama_model_provider/credential/embedding.py:37
#: models_provider/impl/ollama_model_provider/credential/reranker.py:44
#: models_provider/impl/openai_model_provider/credential/embedding.py:36
#: models_provider/impl/openai_model_provider/credential/image.py:35
#: models_provider/impl/openai_model_provider/credential/llm.py:59
#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36
#: models_provider/impl/siliconCloud_model_provider/credential/image.py:35
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:58
#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58
#: models_provider/impl/tencent_model_provider/credential/embedding.py:23
#: models_provider/impl/tencent_model_provider/credential/image.py:37
#: models_provider/impl/tencent_model_provider/credential/llm.py:51
#: models_provider/impl/tencent_model_provider/model/tti.py:54
#: models_provider/impl/vllm_model_provider/credential/embedding.py:36
#: models_provider/impl/vllm_model_provider/credential/llm.py:50
#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36
#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:32
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57
#: models_provider/impl/volcanic_engine_model_provider/model/tts.py:77
#: models_provider/impl/wenxin_model_provider/credential/embedding.py:31
#: models_provider/impl/wenxin_model_provider/credential/llm.py:60
#: models_provider/impl/xf_model_provider/credential/embedding.py:31
#: models_provider/impl/xf_model_provider/credential/llm.py:76
#: models_provider/impl/xf_model_provider/model/tts.py:101
#: models_provider/impl/xinference_model_provider/credential/embedding.py:31
#: models_provider/impl/xinference_model_provider/credential/image.py:32
#: models_provider/impl/xinference_model_provider/credential/llm.py:50
#: models_provider/impl/xinference_model_provider/credential/reranker.py:34
#: models_provider/impl/xinference_model_provider/model/tts.py:44
#: models_provider/impl/zhipu_model_provider/credential/image.py:31
#: models_provider/impl/zhipu_model_provider/credential/llm.py:56
#: models_provider/impl/zhipu_model_provider/model/tti.py:49
msgid "Hello"
msgstr "你好"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:36
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:60
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:46
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:44
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:96
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:89
#: models_provider/impl/anthropic_model_provider/credential/image.py:23
#: models_provider/impl/anthropic_model_provider/credential/llm.py:47
#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40
#: models_provider/impl/azure_model_provider/credential/embedding.py:27
#: models_provider/impl/azure_model_provider/credential/image.py:30
#: models_provider/impl/azure_model_provider/credential/llm.py:59
#: models_provider/impl/azure_model_provider/credential/stt.py:23
#: models_provider/impl/azure_model_provider/credential/tti.py:58
#: models_provider/impl/azure_model_provider/credential/tts.py:41
#: models_provider/impl/deepseek_model_provider/credential/llm.py:47
#: models_provider/impl/gemini_model_provider/credential/embedding.py:26
#: models_provider/impl/gemini_model_provider/credential/image.py:22
#: models_provider/impl/gemini_model_provider/credential/llm.py:47
#: models_provider/impl/gemini_model_provider/credential/stt.py:21
#: models_provider/impl/kimi_model_provider/credential/llm.py:47
#: models_provider/impl/local_model_provider/credential/embedding.py:27
#: models_provider/impl/local_model_provider/credential/reranker.py:28
#: models_provider/impl/ollama_model_provider/credential/embedding.py:26
#: models_provider/impl/ollama_model_provider/credential/image.py:19
#: models_provider/impl/ollama_model_provider/credential/llm.py:44
#: models_provider/impl/ollama_model_provider/credential/reranker.py:27
#: models_provider/impl/ollama_model_provider/credential/reranker.py:31
#: models_provider/impl/openai_model_provider/credential/embedding.py:26
#: models_provider/impl/openai_model_provider/credential/image.py:25
#: models_provider/impl/openai_model_provider/credential/llm.py:48
#: models_provider/impl/openai_model_provider/credential/stt.py:22
#: models_provider/impl/openai_model_provider/credential/tti.py:61
#: models_provider/impl/openai_model_provider/credential/tts.py:40
#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26
#: models_provider/impl/siliconCloud_model_provider/credential/image.py:25
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:47
#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28
#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:22
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:61
#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:22
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47
#: models_provider/impl/tencent_model_provider/credential/embedding.py:19
#: models_provider/impl/tencent_model_provider/credential/image.py:28
#: models_provider/impl/tencent_model_provider/credential/llm.py:31
#: models_provider/impl/tencent_model_provider/credential/tti.py:78
#: models_provider/impl/vllm_model_provider/credential/embedding.py:26
#: models_provider/impl/vllm_model_provider/credential/image.py:22
#: models_provider/impl/vllm_model_provider/credential/llm.py:39
#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26
#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:22
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47
#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25
#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:51
#: models_provider/impl/wenxin_model_provider/credential/embedding.py:27
#: models_provider/impl/wenxin_model_provider/credential/llm.py:46
#: models_provider/impl/xf_model_provider/credential/embedding.py:27
#: models_provider/impl/xf_model_provider/credential/image.py:29
#: models_provider/impl/xf_model_provider/credential/llm.py:66
#: models_provider/impl/xf_model_provider/credential/stt.py:24
#: models_provider/impl/xf_model_provider/credential/tts.py:47
#: models_provider/impl/xinference_model_provider/credential/embedding.py:19
#: models_provider/impl/xinference_model_provider/credential/image.py:22
#: models_provider/impl/xinference_model_provider/credential/llm.py:39
#: models_provider/impl/xinference_model_provider/credential/reranker.py:25
#: models_provider/impl/xinference_model_provider/credential/stt.py:21
#: models_provider/impl/xinference_model_provider/credential/tti.py:59
#: models_provider/impl/xinference_model_provider/credential/tts.py:39
#: models_provider/impl/zhipu_model_provider/credential/image.py:21
#: models_provider/impl/zhipu_model_provider/credential/llm.py:47
#: models_provider/impl/zhipu_model_provider/credential/tti.py:40
#, python-brace-format
msgid "{model_type} Model type is not supported"
msgstr "{model_type} 模型类型不支持"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:44
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:68
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:55
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:53
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:105
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:98
#, python-brace-format
msgid "{key} is required"
msgstr "{key} 是必填项"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:60
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:82
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:69
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:67
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:121
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:113
#: models_provider/impl/anthropic_model_provider/credential/image.py:43
#: models_provider/impl/anthropic_model_provider/credential/llm.py:65
#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61
#: models_provider/impl/azure_model_provider/credential/image.py:50
#: models_provider/impl/azure_model_provider/credential/stt.py:40
#: models_provider/impl/azure_model_provider/credential/tti.py:77
#: models_provider/impl/azure_model_provider/credential/tts.py:58
#: models_provider/impl/deepseek_model_provider/credential/llm.py:65
#: models_provider/impl/gemini_model_provider/credential/embedding.py:43
#: models_provider/impl/gemini_model_provider/credential/image.py:42
#: models_provider/impl/gemini_model_provider/credential/llm.py:66
#: models_provider/impl/gemini_model_provider/credential/stt.py:38
#: models_provider/impl/kimi_model_provider/credential/llm.py:64
#: models_provider/impl/local_model_provider/credential/embedding.py:44
#: models_provider/impl/local_model_provider/credential/reranker.py:45
#: models_provider/impl/ollama_model_provider/credential/reranker.py:51
#: models_provider/impl/openai_model_provider/credential/embedding.py:43
#: models_provider/impl/openai_model_provider/credential/image.py:45
#: models_provider/impl/openai_model_provider/credential/llm.py:67
#: models_provider/impl/openai_model_provider/credential/stt.py:39
#: models_provider/impl/openai_model_provider/credential/tti.py:80
#: models_provider/impl/openai_model_provider/credential/tts.py:58
#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43
#: models_provider/impl/siliconCloud_model_provider/credential/image.py:45
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:66
#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44
#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:39
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:80
#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:40
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66
#: models_provider/impl/tencent_model_provider/credential/embedding.py:30
#: models_provider/impl/tencent_model_provider/credential/image.py:47
#: models_provider/impl/tencent_model_provider/credential/llm.py:57
#: models_provider/impl/tencent_model_provider/credential/tti.py:104
#: models_provider/impl/vllm_model_provider/credential/embedding.py:43
#: models_provider/impl/vllm_model_provider/credential/image.py:42
#: models_provider/impl/vllm_model_provider/credential/llm.py:55
#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43
#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:42
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66
#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42
#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:68
#: models_provider/impl/wenxin_model_provider/credential/embedding.py:38
#: models_provider/impl/xf_model_provider/credential/embedding.py:38
#: models_provider/impl/xf_model_provider/credential/image.py:50
#: models_provider/impl/xf_model_provider/credential/llm.py:84
#: models_provider/impl/xf_model_provider/credential/stt.py:41
#: models_provider/impl/xf_model_provider/credential/tts.py:65
#: models_provider/impl/xinference_model_provider/credential/image.py:41
#: models_provider/impl/xinference_model_provider/credential/reranker.py:40
#: models_provider/impl/xinference_model_provider/credential/stt.py:37
#: models_provider/impl/xinference_model_provider/credential/tti.py:77
#: models_provider/impl/xinference_model_provider/credential/tts.py:56
#: models_provider/impl/zhipu_model_provider/credential/image.py:41
#: models_provider/impl/zhipu_model_provider/credential/llm.py:64
#: models_provider/impl/zhipu_model_provider/credential/tti.py:59
#, python-brace-format
msgid ""
"Verification failed, please check whether the parameters are correct: {error}"
msgstr "认证失败,请检查参数是否正确:{error}"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:17
#: models_provider/impl/anthropic_model_provider/credential/llm.py:22
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14
#: models_provider/impl/azure_model_provider/credential/llm.py:23
#: models_provider/impl/deepseek_model_provider/credential/llm.py:22
#: models_provider/impl/gemini_model_provider/credential/llm.py:22
#: models_provider/impl/kimi_model_provider/credential/llm.py:22
#: models_provider/impl/ollama_model_provider/credential/llm.py:20
#: models_provider/impl/openai_model_provider/credential/llm.py:23
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:22
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22
#: models_provider/impl/tencent_model_provider/credential/llm.py:14
#: models_provider/impl/vllm_model_provider/credential/llm.py:15
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22
#: models_provider/impl/wenxin_model_provider/credential/llm.py:22
#: models_provider/impl/xf_model_provider/credential/llm.py:22
#: models_provider/impl/xf_model_provider/credential/llm.py:41
#: models_provider/impl/xinference_model_provider/credential/llm.py:15
#: models_provider/impl/zhipu_model_provider/credential/llm.py:22
msgid "Temperature"
msgstr "温度"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:18
#: models_provider/impl/anthropic_model_provider/credential/llm.py:23
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15
#: models_provider/impl/azure_model_provider/credential/llm.py:24
#: models_provider/impl/deepseek_model_provider/credential/llm.py:23
#: models_provider/impl/gemini_model_provider/credential/llm.py:23
#: models_provider/impl/kimi_model_provider/credential/llm.py:23
#: models_provider/impl/ollama_model_provider/credential/llm.py:21
#: models_provider/impl/openai_model_provider/credential/llm.py:24
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:23
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23
#: models_provider/impl/tencent_model_provider/credential/llm.py:15
#: models_provider/impl/vllm_model_provider/credential/llm.py:16
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23
#: models_provider/impl/wenxin_model_provider/credential/llm.py:23
#: models_provider/impl/xf_model_provider/credential/llm.py:23
#: models_provider/impl/xf_model_provider/credential/llm.py:42
#: models_provider/impl/xinference_model_provider/credential/llm.py:16
#: models_provider/impl/zhipu_model_provider/credential/llm.py:23
msgid ""
"Higher values make the output more random, while lower values make it more "
"focused and deterministic"
msgstr "较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:30
#: models_provider/impl/anthropic_model_provider/credential/llm.py:31
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23
#: models_provider/impl/azure_model_provider/credential/llm.py:32
#: models_provider/impl/azure_model_provider/credential/llm.py:43
#: models_provider/impl/deepseek_model_provider/credential/llm.py:31
#: models_provider/impl/gemini_model_provider/credential/llm.py:31
#: models_provider/impl/kimi_model_provider/credential/llm.py:31
#: models_provider/impl/ollama_model_provider/credential/llm.py:29
#: models_provider/impl/openai_model_provider/credential/llm.py:32
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:31
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31
#: models_provider/impl/vllm_model_provider/credential/llm.py:24
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31
#: models_provider/impl/wenxin_model_provider/credential/llm.py:31
#: models_provider/impl/xf_model_provider/credential/llm.py:31
#: models_provider/impl/xf_model_provider/credential/llm.py:50
#: models_provider/impl/xinference_model_provider/credential/llm.py:24
#: models_provider/impl/zhipu_model_provider/credential/llm.py:31
msgid "Output the maximum Tokens"
msgstr "输出最大Token数"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:31
msgid "Specify the maximum number of tokens that the model can generate."
msgstr "指定模型可以生成的最大 tokens 数"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:44
#: models_provider/impl/anthropic_model_provider/credential/image.py:15
#: models_provider/impl/anthropic_model_provider/credential/llm.py:74
msgid "API URL"
msgstr ""
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45
#: models_provider/impl/anthropic_model_provider/credential/image.py:16
#: models_provider/impl/anthropic_model_provider/credential/llm.py:75
msgid "API Key"
msgstr ""
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20
#: models_provider/impl/azure_model_provider/credential/tti.py:15
#: models_provider/impl/openai_model_provider/credential/tti.py:15
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:15
#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15
#: models_provider/impl/xinference_model_provider/credential/tti.py:14
#: models_provider/impl/zhipu_model_provider/credential/tti.py:15
#| msgid "page size"
msgid "Image size"
msgstr "每页大小"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20
#: models_provider/impl/azure_model_provider/credential/tti.py:15
msgid "Specify the size of the generated image, such as: 1024x1024"
msgstr "指定生成图片的尺寸, 如: 1024x1024"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
#: models_provider/impl/azure_model_provider/credential/tti.py:40
#: models_provider/impl/openai_model_provider/credential/tti.py:43
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:43
#: models_provider/impl/xinference_model_provider/credential/tti.py:41
msgid "Number of pictures"
msgstr "图片数量"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34
#: models_provider/impl/azure_model_provider/credential/tti.py:40
msgid "Specify the number of generated images"
msgstr "指定生成图片的数量"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:44
msgid "Style"
msgstr "风格"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:44
msgid "Specify the style of generated images"
msgstr "指定生成图片的风格"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48
msgid "Default value, the image style is randomly output by the model"
msgstr "默认值,图片风格由模型随机输出"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49
msgid "photography"
msgstr "摄影"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50
msgid "Portraits"
msgstr "人像写真"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51
msgid "3D cartoon"
msgstr "3D卡通"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52
msgid "animation"
msgstr "动画"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53
msgid "painting"
msgstr "油画"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54
msgid "watercolor"
msgstr "水彩"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:55
msgid "sketch"
msgstr "素描"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:56
msgid "Chinese painting"
msgstr "中国画"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:57
msgid "flat illustration"
msgstr "扁平插画"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20
#| msgid "timbre"
msgid "Timbre"
msgstr "音色"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
msgid "Chinese sounds can support mixed scenes of Chinese and English"
msgstr "中文音色支持中英文混合场景"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26
msgid "Long Xiaochun"
msgstr "龙小淳"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27
msgid "Long Xiaoxia"
msgstr "龙小夏"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28
msgid "Long Xiaochen"
msgstr "龙小诚"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29
msgid "Long Xiaobai"
msgstr "龙小白"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30
#| msgid "Long laotie"
msgid "Long Laotie"
msgstr "龙老铁"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31
msgid "Long Shu"
msgstr "龙书"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32
msgid "Long Shuo"
msgstr "龙硕"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33
msgid "Long Jing"
msgstr "龙婧"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34
msgid "Long Miao"
msgstr "龙妙"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:35
msgid "Long Yue"
msgstr "龙悦"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:36
msgid "Long Yuan"
msgstr "龙媛"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:37
msgid "Long Fei"
msgstr "龙飞"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:38
msgid "Long Jielidou"
msgstr "龙杰力豆"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39
msgid "Long Tong"
msgstr "龙彤"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:40
msgid "Long Xiang"
msgstr "龙祥"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47
msgid "Speaking speed"
msgstr "语速"
#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47
msgid "[0.5, 2], the default is 1, usually one decimal place is enough"
msgstr "[0.5,2]默认为1通常一位小数就足够了"
#: models_provider/impl/anthropic_model_provider/credential/image.py:28
#: models_provider/impl/anthropic_model_provider/credential/llm.py:52
#: models_provider/impl/azure_model_provider/credential/embedding.py:32
#: models_provider/impl/azure_model_provider/credential/image.py:35
#: models_provider/impl/azure_model_provider/credential/llm.py:64
#: models_provider/impl/azure_model_provider/credential/stt.py:28
#: models_provider/impl/azure_model_provider/credential/tti.py:63
#: models_provider/impl/azure_model_provider/credential/tts.py:46
#: models_provider/impl/deepseek_model_provider/credential/llm.py:52
#: models_provider/impl/gemini_model_provider/credential/embedding.py:31
#: models_provider/impl/gemini_model_provider/credential/image.py:27
#: models_provider/impl/gemini_model_provider/credential/llm.py:52
#: models_provider/impl/gemini_model_provider/credential/stt.py:26
#: models_provider/impl/kimi_model_provider/credential/llm.py:52
#: models_provider/impl/local_model_provider/credential/embedding.py:31
#: models_provider/impl/local_model_provider/credential/reranker.py:32
#: models_provider/impl/ollama_model_provider/credential/embedding.py:46
#: models_provider/impl/ollama_model_provider/credential/llm.py:62
#: models_provider/impl/ollama_model_provider/credential/reranker.py:63
#: models_provider/impl/openai_model_provider/credential/embedding.py:31
#: models_provider/impl/openai_model_provider/credential/image.py:30
#: models_provider/impl/openai_model_provider/credential/llm.py:53
#: models_provider/impl/openai_model_provider/credential/stt.py:27
#: models_provider/impl/openai_model_provider/credential/tti.py:66
#: models_provider/impl/openai_model_provider/credential/tts.py:45
#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31
#: models_provider/impl/siliconCloud_model_provider/credential/image.py:30
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:52
#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32
#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:27
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:66
#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:27
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52
#: models_provider/impl/tencent_model_provider/credential/image.py:32
#: models_provider/impl/vllm_model_provider/credential/embedding.py:31
#: models_provider/impl/vllm_model_provider/credential/image.py:27
#: models_provider/impl/vllm_model_provider/credential/llm.py:65
#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31
#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:27
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52
#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30
#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:56
#: models_provider/impl/wenxin_model_provider/credential/llm.py:55
#: models_provider/impl/wenxin_model_provider/credential/llm.py:72
#: models_provider/impl/xf_model_provider/credential/image.py:34
#: models_provider/impl/xf_model_provider/credential/llm.py:71
#: models_provider/impl/xf_model_provider/credential/stt.py:29
#: models_provider/impl/xf_model_provider/credential/tts.py:52
#: models_provider/impl/xinference_model_provider/credential/embedding.py:40
#: models_provider/impl/xinference_model_provider/credential/image.py:27
#: models_provider/impl/xinference_model_provider/credential/llm.py:59
#: models_provider/impl/xinference_model_provider/credential/reranker.py:29
#: models_provider/impl/xinference_model_provider/credential/stt.py:26
#: models_provider/impl/xinference_model_provider/credential/tti.py:64
#: models_provider/impl/xinference_model_provider/credential/tts.py:44
#: models_provider/impl/zhipu_model_provider/credential/image.py:26
#: models_provider/impl/zhipu_model_provider/credential/llm.py:51
#: models_provider/impl/zhipu_model_provider/credential/tti.py:45
#, python-brace-format
msgid "{key} is required"
msgstr "{key} 是必填项"
#: models_provider/impl/anthropic_model_provider/credential/llm.py:32
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24
#: models_provider/impl/azure_model_provider/credential/llm.py:33
#: models_provider/impl/azure_model_provider/credential/llm.py:44
#: models_provider/impl/deepseek_model_provider/credential/llm.py:32
#: models_provider/impl/gemini_model_provider/credential/llm.py:32
#: models_provider/impl/kimi_model_provider/credential/llm.py:32
#: models_provider/impl/ollama_model_provider/credential/llm.py:30
#: models_provider/impl/openai_model_provider/credential/llm.py:33
#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:32
#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32
#: models_provider/impl/vllm_model_provider/credential/llm.py:25
#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32
#: models_provider/impl/wenxin_model_provider/credential/llm.py:32
#: models_provider/impl/xf_model_provider/credential/llm.py:32
#: models_provider/impl/xf_model_provider/credential/llm.py:51
#: models_provider/impl/xinference_model_provider/credential/llm.py:25
#: models_provider/impl/zhipu_model_provider/credential/llm.py:32
msgid "Specify the maximum number of tokens that the model can generate"
msgstr "指定模型可以生成的最大 tokens 数"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36
msgid ""
"An update to Claude 2 that doubles the context window and improves "
"reliability, hallucination rates, and evidence-based accuracy in long "
"documents and RAG contexts."
msgstr ""
"Claude 2 的更新,采用双倍的上下文窗口,并在长文档和 RAG 上下文中提高可靠性、"
"幻觉率和循证准确性。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43
msgid ""
"Anthropic is a powerful model that can handle a variety of tasks, from "
"complex dialogue and creative content generation to detailed command "
"obedience."
msgstr ""
"Anthropic 功能强大的模型,可处理各种任务,从复杂的对话和创意内容生成到详细的"
"指令服从。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50
msgid ""
"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-"
"instant responsiveness. The model can answer simple queries and requests "
"quickly. Customers will be able to build seamless AI experiences that mimic "
"human interactions. Claude 3 Haiku can process images and return text "
"output, and provides 200K context windows."
msgstr ""
"Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该"
"模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体"
"验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57
msgid ""
"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between "
"intelligence and speed, especially when it comes to handling enterprise "
"workloads. This model offers maximum utility while being priced lower than "
"competing products, and it's been engineered to be a solid choice for "
"deploying AI at scale."
msgstr ""
"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在"
"处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过"
"精心设计,是大规模部署人工智能的可靠选择。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64
msgid ""
"The Claude 3.5 Sonnet raises the industry standard for intelligence, "
"outperforming competing models and the Claude 3 Opus in extensive "
"evaluations, with the speed and cost-effectiveness of our mid-range models."
msgstr ""
"Claude 3.5 Sonnet提高了智能的行业标准在广泛的评估中超越了竞争对手的型号和"
"Claude 3 Opus具有我们中端型号的速度和成本效益。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71
msgid ""
"A faster, more affordable but still very powerful model that can handle a "
"range of tasks including casual conversation, text analysis, summarization "
"and document question answering."
msgstr ""
"一种更快速、更实惠但仍然非常强大的模型,它可以处理一系列任务,包括随意对话、"
"文本分析、摘要和文档问题回答。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78
msgid ""
"Titan Text Premier is the most powerful and advanced model in the Titan Text "
"series, designed to deliver exceptional performance for a variety of "
"enterprise applications. With its cutting-edge features, it delivers greater "
"accuracy and outstanding results, making it an excellent choice for "
"organizations looking for a top-notch text processing solution."
msgstr ""
"Titan Text Premier 是 Titan Text 系列中功能强大且先进的型号,旨在为各种企业应"
"用程序提供卓越的性能。凭借其尖端功能,它提供了更高的准确性和出色的结果,使其"
"成为寻求一流文本处理解决方案的组织的绝佳选择。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85
msgid ""
"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-"
"tuning English-language tasks, including summarization and copywriting, "
"where customers require smaller, more cost-effective, and highly "
"customizable models."
msgstr ""
"Amazon Titan Text Lite 是一种轻量级的高效模型,非常适合英语任务的微调,包括摘"
"要和文案写作等,在这种场景下,客户需要更小、更经济高效且高度可定制的模型"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91
msgid ""
"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making "
"it ideal for a variety of high-level general language tasks, such as open-"
"ended text generation and conversational chat, as well as support in "
"retrieval-augmented generation (RAG). At launch, the model is optimized for "
"English, but other languages are supported."
msgstr ""
"Amazon Titan Text Express 的上下文长度长达 8000 个 tokens因而非常适合各种高"
"级常规语言任务例如开放式文本生成和对话式聊天以及检索增强生成RAG中的支"
"持。在发布时,该模型针对英语进行了优化,但也支持其他语言。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97
msgid ""
"7B dense converter for rapid deployment and easy customization. Small in "
"size yet powerful in a variety of use cases. Supports English and code, as "
"well as 32k context windows."
msgstr ""
"7B 密集型转换器,可快速部署,易于定制。体积虽小,但功能强大,适用于各种用例。"
"支持英语和代码,以及 32k 的上下文窗口。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103
msgid ""
"Advanced Mistral AI large-scale language model capable of handling any "
"language task, including complex multilingual reasoning, text understanding, "
"transformation, and code generation."
msgstr ""
"先进的 Mistral AI 大型语言模型,能够处理任何语言任务,包括复杂的多语言推理、"
"文本理解、转换和代码生成。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109
msgid ""
"Ideal for content creation, conversational AI, language understanding, R&D, "
"and enterprise applications"
msgstr "非常适合内容创作、会话式人工智能、语言理解、研发和企业应用"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115
msgid ""
"Ideal for limited computing power and resources, edge devices, and faster "
"training times."
msgstr "非常适合有限的计算能力和资源、边缘设备和更快的训练时间。"
#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123
msgid ""
"Titan Embed Text is the largest embedding model in the Amazon Titan Embed "
"series and can handle various text embedding tasks, such as text "
"classification, text similarity calculation, etc."
msgstr ""
"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以处理各种文本"
"嵌入任务,如文本分类、文本相似度计算等。"
#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28
#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47
#, python-brace-format
msgid "The following fields are required: {keys}"
msgstr "以下字段是必填项: {keys}"
#: models_provider/impl/azure_model_provider/credential/embedding.py:44
#: models_provider/impl/azure_model_provider/credential/llm.py:76
msgid "Verification failed, please check whether the parameters are correct"
msgstr "认证失败,请检查参数是否正确"
#: models_provider/impl/azure_model_provider/credential/tti.py:28
#: models_provider/impl/openai_model_provider/credential/tti.py:29
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
#: models_provider/impl/xinference_model_provider/credential/tti.py:28
msgid "Picture quality"
msgstr "图片质量"
#: models_provider/impl/azure_model_provider/credential/tts.py:17
#: models_provider/impl/openai_model_provider/credential/tts.py:17
msgid ""
"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) "
"to find one that suits your desired tone and audience. The current voiceover "
"is optimized for English."
msgstr ""
"尝试不同的声音(合金、回声、寓言、缟玛瑙、新星和闪光),找到一种适合您所需的"
"音调和听众的声音。当前的语音针对英语进行了优化。"
#: models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24
msgid "Good at common conversational tasks, supports 32K contexts"
msgstr "擅长通用对话任务,支持 32K 上下文"
#: models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29
msgid "Good at handling programming tasks, supports 16K contexts"
msgstr "擅长处理编程任务,支持 16K 上下文"
#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:32
msgid "Latest Gemini 1.0 Pro model, updated with Google update"
msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新"
#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:36
msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update"
msgstr "最新的Gemini 1.0 Pro Vision模型随Google更新而更新"
#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:43
#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:47
#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:54
#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:58
msgid "Latest Gemini 1.5 Flash model, updated with Google updates"
msgstr "最新的Gemini 1.5 Flash模型随Google更新而更新"
#: models_provider/impl/gemini_model_provider/model/stt.py:53
msgid "convert audio to text"
msgstr "将音频转换为文本"
#: models_provider/impl/local_model_provider/credential/embedding.py:53
#: models_provider/impl/local_model_provider/credential/reranker.py:54
msgid "Model catalog"
msgstr "模型目录"
#: models_provider/impl/local_model_provider/local_model_provider.py:39
msgid "local model"
msgstr "本地模型"
#: models_provider/impl/ollama_model_provider/credential/embedding.py:30
#: models_provider/impl/ollama_model_provider/credential/image.py:23
#: models_provider/impl/ollama_model_provider/credential/llm.py:48
#: models_provider/impl/ollama_model_provider/credential/reranker.py:35
#: models_provider/impl/vllm_model_provider/credential/llm.py:43
#: models_provider/impl/xinference_model_provider/credential/embedding.py:24
#: models_provider/impl/xinference_model_provider/credential/llm.py:44
msgid "API domain name is invalid"
msgstr "API 域名无效"
#: models_provider/impl/ollama_model_provider/credential/embedding.py:35
#: models_provider/impl/ollama_model_provider/credential/image.py:28
#: models_provider/impl/ollama_model_provider/credential/llm.py:53
#: models_provider/impl/ollama_model_provider/credential/reranker.py:40
#: models_provider/impl/vllm_model_provider/credential/llm.py:47
#: models_provider/impl/xinference_model_provider/credential/embedding.py:30
#: models_provider/impl/xinference_model_provider/credential/llm.py:48
msgid "The model does not exist, please download the model first"
msgstr "模型不存在,请先下载模型"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:56
msgid ""
"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
"in size from 7 billion to 70 billion. This is a repository of 7B pretrained "
"models. Links to other models can be found in the index at the bottom."
msgstr ""
"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。"
"这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:60
msgid ""
"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
"in size from 7 billion to 70 billion. This is a repository of 13B pretrained "
"models. Links to other models can be found in the index at the bottom."
msgstr ""
"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。"
"这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:64
msgid ""
"Llama 2 is a set of pretrained and fine-tuned generative text models ranging "
"in size from 7 billion to 70 billion. This is a repository of 70B pretrained "
"models. Links to other models can be found in the index at the bottom."
msgstr ""
"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。"
"这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:68
msgid ""
"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese "
"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so "
"that it has strong Chinese conversation capabilities."
msgstr ""
"由于Llama2本身的中文对齐较弱我们采用中文指令集对meta-llama/Llama-2-13b-"
"chat-hf进行LoRA微调使其具备较强的中文对话能力。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:72
msgid ""
"Meta Llama 3: The most capable public product LLM to date. 8 billion "
"parameters."
msgstr "Meta Llama 3迄今为止最有能力的公开产品LLM。80亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:76
msgid ""
"Meta Llama 3: The most capable public product LLM to date. 70 billion "
"parameters."
msgstr "Meta Llama 3迄今为止最有能力的公开产品LLM。700亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:80
msgid ""
"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced "
"the model's alignment with human preferences and its multi-language "
"processing capabilities. Models of all sizes support a context length of "
"32768 tokens. 500 million parameters."
msgstr ""
"qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有"
"显著增强。所有规模的模型都支持32768个tokens的上下文长度。5亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:84
msgid ""
"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced "
"the model's alignment with human preferences and its multi-language "
"processing capabilities. Models of all sizes support a context length of "
"32768 tokens. 1.8 billion parameters."
msgstr ""
"qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有"
"显著增强。所有规模的模型都支持32768个tokens的上下文长度。18亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:88
msgid ""
"Compared with previous versions, qwen 1.5 4b has significantly enhanced the "
"model's alignment with human preferences and its multi-language processing "
"capabilities. Models of all sizes support a context length of 32768 tokens. "
"4 billion parameters."
msgstr ""
"qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
"著增强。所有规模的模型都支持32768个tokens的上下文长度。40亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:93
msgid ""
"Compared with previous versions, qwen 1.5 7b has significantly enhanced the "
"model's alignment with human preferences and its multi-language processing "
"capabilities. Models of all sizes support a context length of 32768 tokens. "
"7 billion parameters."
msgstr ""
"qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
"著增强。所有规模的模型都支持32768个tokens的上下文长度。70亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:97
msgid ""
"Compared with previous versions, qwen 1.5 14b has significantly enhanced the "
"model's alignment with human preferences and its multi-language processing "
"capabilities. Models of all sizes support a context length of 32768 tokens. "
"14 billion parameters."
msgstr ""
"qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
"著增强。所有规模的模型都支持32768个tokens的上下文长度。140亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:101
msgid ""
"Compared with previous versions, qwen 1.5 32b has significantly enhanced the "
"model's alignment with human preferences and its multi-language processing "
"capabilities. Models of all sizes support a context length of 32768 tokens. "
"32 billion parameters."
msgstr ""
"qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
"著增强。所有规模的模型都支持32768个tokens的上下文长度。320亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:105
msgid ""
"Compared with previous versions, qwen 1.5 72b has significantly enhanced the "
"model's alignment with human preferences and its multi-language processing "
"capabilities. Models of all sizes support a context length of 32768 tokens. "
"72 billion parameters."
msgstr ""
"qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显"
"著增强。所有规模的模型都支持32768个tokens的上下文长度。720亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:109
msgid ""
"Compared with previous versions, qwen 1.5 110b has significantly enhanced "
"the model's alignment with human preferences and its multi-language "
"processing capabilities. Models of all sizes support a context length of "
"32768 tokens. 110 billion parameters."
msgstr ""
"qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有"
"显著增强。所有规模的模型都支持32768个tokens的上下文长度。1100亿参数。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:153
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:193
msgid ""
"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open "
"model."
msgstr "Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。"
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:162
#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:197
msgid ""
"A high-performance open embedding model with a large token context window."
msgstr "一个具有大 tokens上下文窗口的高性能开放嵌入模型。"
#: models_provider/impl/openai_model_provider/credential/tti.py:16
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:16
msgid ""
"The image generation endpoint allows you to create raw images based on text "
"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 "
"or 1792x1024 pixels."
msgstr ""
"图像生成端点允许您根据文本提示创建原始图像。使用 DALL·E 3 时,图像的尺寸可以"
"为 1024x1024、1024x1792 或 1792x1024 像素。"
#: models_provider/impl/openai_model_provider/credential/tti.py:29
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:29
msgid ""
" \n"
"By default, images are produced in standard quality, but with DALL·E 3 you "
"can set quality: \"hd\" to enhance detail. Square, standard quality images "
"are generated fastest.\n"
" "
msgstr ""
"默认情况下,图像以标准质量生成,但使用 DALL·E 3 时您可以设置质量“hd”以增"
"强细节。方形、标准质量的图像生成速度最快。"
#: models_provider/impl/openai_model_provider/credential/tti.py:44
#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:44
msgid ""
"You can use DALL·E 3 to request 1 image at a time (requesting more images by "
"issuing parallel requests), or use DALL·E 2 with the n parameter to request "
"up to 10 images at a time."
msgstr ""
"您可以使用 DALL·E 3 一次请求 1 个图像(通过发出并行请求来请求更多图像),或者"
"使用带有 n 参数的 DALL·E 2 一次最多请求 10 个图像。"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:35
#: models_provider/impl/openai_model_provider/openai_model_provider.py:119
#: models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:118
msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments"
msgstr "最新的gpt-3.5-turbo随OpenAI调整而更新"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:38
msgid "Latest gpt-4, updated with OpenAI adjustments"
msgstr "最新的gpt-4随OpenAI调整而更新"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:40
#: models_provider/impl/openai_model_provider/openai_model_provider.py:99
msgid ""
"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI "
"adjustments"
msgstr "最新的GPT-4o比gpt-4-turbo更便宜、更快随OpenAI调整而更新"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:43
#: models_provider/impl/openai_model_provider/openai_model_provider.py:102
msgid ""
"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI "
"adjustments"
msgstr "最新的gpt-4o-mini比gpt-4o更便宜、更快随OpenAI调整而更新"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:46
msgid "The latest gpt-4-turbo, updated with OpenAI adjustments"
msgstr "最新的gpt-4-turbo随OpenAI调整而更新"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:49
msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments"
msgstr "最新的gpt-4-turbo-preview随OpenAI调整而更新"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:53
msgid ""
"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 "
"tokens"
msgstr "2024年1月25日的gpt-3.5-turbo快照支持上下文长度16,385 tokens"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:57
msgid ""
"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 "
"tokens"
msgstr "2023年11月6日的gpt-3.5-turbo快照支持上下文长度16,385 tokens"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:61
msgid ""
"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June "
"13, 2024"
msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照将于2024年6月13日弃用"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:65
msgid ""
"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens"
msgstr "2024年5月13日的gpt-4o快照支持上下文长度128,000 tokens"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:69
msgid ""
"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 "
"tokens"
msgstr "2024年4月9日的gpt-4-turbo快照支持上下文长度128,000 tokens"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:72
msgid ""
"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 "
"tokens"
msgstr "2024年1月25日的gpt-4-turbo快照支持上下文长度128,000 tokens"
#: models_provider/impl/openai_model_provider/openai_model_provider.py:75
msgid ""
"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 "
"tokens"
msgstr "2023年11月6日的gpt-4-turbo快照支持上下文长度128,000 tokens"
#: models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58
msgid "Tencent Cloud"
msgstr "腾讯云"
#: models_provider/impl/tencent_model_provider/credential/llm.py:41
#: models_provider/impl/tencent_model_provider/credential/tti.py:88
#, python-brace-format
msgid "{keys} is required"
msgstr "{keys} 是必填项"
#: models_provider/impl/tencent_model_provider/credential/tti.py:14
msgid "painting style"
msgstr "绘画风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:14
msgid "If not passed, the default value is 201 (Japanese anime style)"
msgstr "如果未传递则默认值为201日本动漫风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:18
msgid "Not limited to style"
msgstr "不限于风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:19
msgid "ink painting"
msgstr "水墨画"
#: models_provider/impl/tencent_model_provider/credential/tti.py:20
msgid "concept art"
msgstr "概念艺术"
#: models_provider/impl/tencent_model_provider/credential/tti.py:21
msgid "Oil painting 1"
msgstr "油画1"
#: models_provider/impl/tencent_model_provider/credential/tti.py:22
msgid "Oil Painting 2 (Van Gogh)"
msgstr "油画2梵高"
#: models_provider/impl/tencent_model_provider/credential/tti.py:23
msgid "watercolor painting"
msgstr "水彩画"
#: models_provider/impl/tencent_model_provider/credential/tti.py:24
msgid "pixel art"
msgstr "像素画"
#: models_provider/impl/tencent_model_provider/credential/tti.py:25
msgid "impasto style"
msgstr "厚涂风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:26
msgid "illustration"
msgstr "插图"
#: models_provider/impl/tencent_model_provider/credential/tti.py:27
msgid "paper cut style"
msgstr "剪纸风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:28
msgid "Impressionism 1 (Monet)"
msgstr "印象派1莫奈"
#: models_provider/impl/tencent_model_provider/credential/tti.py:29
msgid "Impressionism 2"
msgstr "印象派2"
#: models_provider/impl/tencent_model_provider/credential/tti.py:31
msgid "classical portraiture"
msgstr "古典肖像画"
#: models_provider/impl/tencent_model_provider/credential/tti.py:32
msgid "black and white sketch"
msgstr "黑白素描画"
#: models_provider/impl/tencent_model_provider/credential/tti.py:33
msgid "cyberpunk"
msgstr "赛博朋克"
#: models_provider/impl/tencent_model_provider/credential/tti.py:34
msgid "science fiction style"
msgstr "科幻风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:35
msgid "dark style"
msgstr "暗黑风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:37
msgid "vaporwave"
msgstr "蒸汽波"
#: models_provider/impl/tencent_model_provider/credential/tti.py:38
msgid "Japanese animation"
msgstr "日系动漫"
#: models_provider/impl/tencent_model_provider/credential/tti.py:39
msgid "monster style"
msgstr "怪兽风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:40
msgid "Beautiful ancient style"
msgstr "唯美古风"
#: models_provider/impl/tencent_model_provider/credential/tti.py:41
msgid "retro anime"
msgstr "复古动漫"
#: models_provider/impl/tencent_model_provider/credential/tti.py:42
msgid "Game cartoon hand drawing"
msgstr "游戏卡通手绘"
#: models_provider/impl/tencent_model_provider/credential/tti.py:43
msgid "Universal realistic style"
msgstr "通用写实风格"
#: models_provider/impl/tencent_model_provider/credential/tti.py:50
msgid "Generate image resolution"
msgstr "生成图像分辨率"
#: models_provider/impl/tencent_model_provider/credential/tti.py:50
msgid "If not transmitted, the default value is 768:768."
msgstr "不传默认使用768:768。"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:38
msgid ""
"The most effective version of the current hybrid model, the trillion-level "
"parameter scale MOE-32K long article model. Reaching the absolute leading "
"level on various benchmarks, with complex instructions and reasoning, "
"complex mathematical capabilities, support for function call, and "
"application focus optimization in fields such as multi-language translation, "
"finance, law, and medical care"
msgstr ""
"当前混元模型中效果最优版本,万亿级参数规模 MOE-32K 长文模型。在各种 "
"benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 "
"functioncall在多语言翻译、金融法律医疗等领域应用重点优化"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:45
msgid ""
"A better routing strategy is adopted to simultaneously alleviate the "
"problems of load balancing and expert convergence. For long articles, the "
"needle-in-a-haystack index reaches 99.9%"
msgstr ""
"采用更优的路由策略,同时缓解了负载均衡和专家趋同的问题。长文方面,大海捞针指"
"标达到99.9%"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:51
msgid ""
"Upgraded to MOE structure, the context window is 256k, leading many open "
"source models in multiple evaluation sets such as NLP, code, mathematics, "
"industry, etc."
msgstr ""
"升级为 MOE 结构,上下文窗口为 256k ,在 NLP代码数学行业等多项评测集上领"
"先众多开源模型"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:57
msgid ""
"Hunyuan's latest version of the role-playing model, a role-playing model "
"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan "
"model combined with the role-playing scene data set for additional training, "
"and has better basic effects in role-playing scenes."
msgstr ""
"混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合"
"角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:63
msgid ""
"Hunyuan's latest MOE architecture FunctionCall model has been trained with "
"high-quality FunctionCall data and has a context window of 32K, leading in "
"multiple dimensions of evaluation indicators."
msgstr ""
"混元最新 MOE 架构 FunctionCall 模型,经过高质量的 FunctionCall 数据训练,上下"
"文窗口达 32K在多个维度的评测指标上处于领先。"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:69
msgid ""
"Hunyuan's latest code generation model, after training the base model with "
"200B high-quality code data, and iterating on high-quality SFT data for half "
"a year, the context long window length has been increased to 8K, and it "
"ranks among the top in the automatic evaluation indicators of code "
"generation in the five major languages; the five major languages In the "
"manual high-quality evaluation of 10 comprehensive code tasks that consider "
"all aspects, the performance is in the first echelon."
msgstr ""
"混元最新代码生成模型,经过 200B 高质量代码数据增训基座模型,迭代半年高质量 "
"SFT 数据训练,上下文长窗口长度增大到 8K五大语言代码生成自动评测指标上位居前"
"列五大语言10项考量各方面综合代码任务人工高质量评测上性能处于第一梯队"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:77
msgid ""
"Tencent's Hunyuan Embedding interface can convert text into high-quality "
"vector data. The vector dimension is 1024 dimensions."
msgstr ""
"腾讯混元 Embedding 接口可以将文本转化为高质量的向量数据。向量维度为1024维。"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:87
msgid "Mixed element visual model"
msgstr "混元视觉模型"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:94
msgid "Hunyuan graph model"
msgstr "混元生图模型"
#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:125
msgid "Tencent Hunyuan"
msgstr "腾讯混元"
#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:24
#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:42
msgid "Facebooks 125M parameter model"
msgstr "Facebook的125M参数模型"
#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:25
msgid "BAAIs 7B parameter model"
msgstr "BAAI的7B参数模型"
#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:26
msgid "BAAIs 13B parameter mode"
msgstr "BAAI的13B参数模型"
#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16
msgid ""
"If the gap between width, height and 512 is too large, the picture rendering "
"effect will be poor and the probability of excessive delay will increase "
"significantly. Recommended ratio and corresponding width and height before "
"super score: width*height"
msgstr ""
"宽、高与512差距过大则出图效果不佳、延迟过长概率显著增加。超分前建议比例及对"
"应宽高width*height"
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15
#: models_provider/impl/xinference_model_provider/credential/tts.py:15
msgid "timbre"
msgstr "音色"
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:31
#: models_provider/impl/xf_model_provider/credential/tts.py:28
#| msgid "Speaking speed"
msgid "speaking speed"
msgstr "语速"
#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:31
msgid "[0.2,3], the default is 1, usually one decimal place is enough"
msgstr "[0.2,3]默认为1通常保留一位小数即可"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88
msgid ""
"The user goes to the model inference page of Volcano Ark to create an "
"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call "
"it."
msgstr ""
"用户前往火山方舟的模型推理页面创建推理接入点这里需要输入ep-xxxxxxxxxx-yyyy"
"进行调用"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59
msgid "Universal 2.0-Vincent Diagram"
msgstr "通用2.0-文生图"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64
msgid "Universal 2.0Pro-Vincent Chart"
msgstr "通用2.0Pro-文生图"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69
msgid "Universal 1.4-Vincent Chart"
msgstr "通用1.4-文生图"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74
msgid "Animation 1.3.0-Vincent Picture"
msgstr "动漫1.3.0-文生图"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79
msgid "Animation 1.3.1-Vincent Picture"
msgstr "动漫1.3.1-文生图"
#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113
msgid "volcano engine"
msgstr "火山引擎"
#: models_provider/impl/wenxin_model_provider/credential/llm.py:51
#, python-brace-format
msgid "{model_name} The model does not support"
msgstr "{model_name} 模型不支持"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53
msgid ""
"ERNIE-Bot-4 is a large language model independently developed by Baidu. It "
"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
"content creation and generation."
msgstr ""
"ERNIE-Bot-4是百度自行研发的大语言模型覆盖海量中文数据具有更强的对话问答、"
"内容创作生成等能力。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27
msgid ""
"ERNIE-Bot is a large language model independently developed by Baidu. It "
"covers massive Chinese data and has stronger capabilities in dialogue Q&A, "
"content creation and generation."
msgstr ""
"ERNIE-Bot是百度自行研发的大语言模型覆盖海量中文数据具有更强的对话问答、内"
"容创作生成等能力。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30
msgid ""
"ERNIE-Bot-turbo is a large language model independently developed by Baidu. "
"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, "
"content creation and generation, and has a faster response speed."
msgstr ""
"ERNIE-Bot-turbo是百度自行研发的大语言模型覆盖海量中文数据具有更强的对话问"
"答、内容创作生成等能力,响应速度更快。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33
msgid ""
"BLOOMZ-7B is a well-known large language model in the industry. It was "
"developed and open sourced by BigScience and can output text in 46 languages "
"and 13 programming languages."
msgstr ""
"BLOOMZ-7B是业内知名的大语言模型由BigScience研发并开源能够以46种语言和13种"
"编程语言输出文本。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39
msgid ""
"Llama-2-13b-chat was developed by Meta AI and is open source. It performs "
"well in scenarios such as coding, reasoning and knowledge application. "
"Llama-2-13b-chat is a native open source version with balanced performance "
"and effect, suitable for conversation scenarios."
msgstr ""
"Llama-2-13b-chat由Meta AI研发并开源在编码、推理及知识应用等场景表现优秀"
"Llama-2-13b-chat是性能与效果均衡的原生开源版本适用于对话场景。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42
msgid ""
"Llama-2-70b-chat was developed by Meta AI and is open source. It performs "
"well in scenarios such as coding, reasoning, and knowledge application. "
"Llama-2-70b-chat is a native open source version with high-precision effects."
msgstr ""
"Llama-2-70b-chat由Meta AI研发并开源在编码、推理及知识应用等场景表现优秀"
"Llama-2-70b-chat是高精度效果的原生开源版本。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45
msgid ""
"The Chinese enhanced version developed by the Qianfan team based on "
"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-"
"EVAL."
msgstr ""
"千帆团队在Llama-2-7b基础上的中文增强版本在CMMLU、C-EVAL等中文知识库上表现优"
"异。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49
msgid ""
"Embedding-V1 is a text representation model based on Baidu Wenxin large "
"model technology. It can convert text into a vector form represented by "
"numerical values and can be used in text retrieval, information "
"recommendation, knowledge mining and other scenarios. Embedding-V1 provides "
"the Embeddings interface, which can generate corresponding vector "
"representations based on input content. You can call this interface to input "
"text into the model and obtain the corresponding vector representation for "
"subsequent text processing and analysis."
msgstr ""
"Embedding-V1是一个基于百度文心大模型技术的文本表示模型可以将文本转化为用数"
"值表示的向量形式,用于文本检索、信息推荐、知识挖掘等场景。 Embedding-V1提供了"
"Embeddings接口可以根据输入内容生成对应的向量表示。您可以通过调用该接口将"
"文本输入到模型中,获取到对应的向量表示,从而进行后续的文本处理和分析。"
#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66
msgid "Thousand sails large model"
msgstr "千帆大模型"
#: models_provider/impl/xf_model_provider/credential/image.py:42
msgid "Please outline this picture"
msgstr "请描述这张图片"
#: models_provider/impl/xf_model_provider/credential/tts.py:15
msgid "Speaker"
msgstr "发音人"
#: models_provider/impl/xf_model_provider/credential/tts.py:16
msgid ""
"Speaker, optional value: Please go to the console to add a trial or purchase "
"speaker. After adding, the speaker parameter value will be displayed."
msgstr ""
"发音人,可选值:请到控制台添加试用或购买发音人,添加后即显示发音人参数值"
#: models_provider/impl/xf_model_provider/credential/tts.py:21
msgid "iFlytek Xiaoyan"
msgstr "讯飞小燕"
#: models_provider/impl/xf_model_provider/credential/tts.py:22
msgid "iFlytek Xujiu"
msgstr "讯飞许久"
#: models_provider/impl/xf_model_provider/credential/tts.py:23
msgid "iFlytek Xiaoping"
msgstr "讯飞小萍"
#: models_provider/impl/xf_model_provider/credential/tts.py:24
msgid "iFlytek Xiaojing"
msgstr "讯飞小婧"
#: models_provider/impl/xf_model_provider/credential/tts.py:25
msgid "iFlytek Xuxiaobao"
msgstr "讯飞许小宝"
#: models_provider/impl/xf_model_provider/credential/tts.py:28
msgid "Speech speed, optional value: [0-100], default is 50"
msgstr "语速,可选值:[0-100]默认为50"
#: models_provider/impl/xf_model_provider/xf_model_provider.py:39
#: models_provider/impl/xf_model_provider/xf_model_provider.py:50
msgid "Chinese and English recognition"
msgstr "中英文识别"
#: models_provider/impl/xf_model_provider/xf_model_provider.py:66
msgid "iFlytek Spark"
msgstr "讯飞星火"
#: models_provider/impl/xinference_model_provider/credential/tti.py:15
msgid ""
"The image generation endpoint allows you to create raw images based on text "
"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or "
"1792x1024 pixels."
msgstr ""
"图像生成端点允许您根据文本提示创建原始图像。图像的尺寸可以为 1024x1024、"
"1024x1792 或 1792x1024 像素。"
#: models_provider/impl/xinference_model_provider/credential/tti.py:29
msgid ""
"By default, images are generated in standard quality, you can set quality: "
"\"hd\" to enhance detail. Square, standard quality images are generated "
"fastest."
msgstr ""
"默认情况下图像以标准质量生成您可以设置质量“hd”以增强细节。方形、标准质"
"量的图像生成速度最快。"
#: models_provider/impl/xinference_model_provider/credential/tti.py:42
msgid ""
"You can request 1 image at a time (requesting more images by making parallel "
"requests), or up to 10 images at a time using the n parameter."
msgstr ""
"您可以一次请求 1 个图像(通过发出并行请求来请求更多图像),或者使用 n 参数一"
"次最多请求 10 个图像。"
#: models_provider/impl/xinference_model_provider/credential/tts.py:20
msgid "Chinese female"
msgstr "中文女"
#: models_provider/impl/xinference_model_provider/credential/tts.py:21
msgid "Chinese male"
msgstr "中文男"
#: models_provider/impl/xinference_model_provider/credential/tts.py:22
msgid "Japanese male"
msgstr "日语男"
#: models_provider/impl/xinference_model_provider/credential/tts.py:23
msgid "Cantonese female"
msgstr "粤语女"
#: models_provider/impl/xinference_model_provider/credential/tts.py:24
msgid "English female"
msgstr "英文女"
#: models_provider/impl/xinference_model_provider/credential/tts.py:25
msgid "English male"
msgstr "英文男"
#: models_provider/impl/xinference_model_provider/credential/tts.py:26
msgid "Korean female"
msgstr "韩语女"
#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:37
msgid ""
"Code Llama is a language model specifically designed for code generation."
msgstr "Code Llama 是一个专门用于代码生成的语言模型。"
#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:44
msgid ""
" \n"
"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, "
"designed to perform specific tasks.\n"
" "
msgstr ""
"Code Llama Instruct 是 Code Llama 的指令微调版本,专为执行特定任务而设计。"
#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:53
msgid ""
"Code Llama Python is a language model specifically designed for Python code "
"generation."
msgstr "Code Llama Python 是一个专门用于 Python 代码生成的语言模型。"
#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:60
msgid ""
"CodeQwen 1.5 is a language model for code generation with high performance."
msgstr "CodeQwen 1.5 是一个用于代码生成的语言模型,具有较高的性能。"
#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:67
msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5."
msgstr "CodeQwen 1.5 Chat 是一个聊天模型版本的 CodeQwen 1.5。"
#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:74
msgid "Deepseek is a large-scale language model with 13 billion parameters."
msgstr "Deepseek Chat 是一个聊天模型版本的 Deepseek。"
#: models_provider/impl/zhipu_model_provider/credential/tti.py:16
msgid ""
"Image size, only cogview-3-plus supports this parameter. Optional range: "
"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the "
"default is 1024x1024."
msgstr ""
"图片尺寸,仅 cogview-3-plus 支持该参数。可选范围:"
"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默认是"
"1024x1024。"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34
msgid ""
"Have strong multi-modal understanding capabilities. Able to understand up to "
"five images simultaneously and supports video content understanding"
msgstr "具有强大的多模态理解能力。能够同时理解多达五张图像,并支持视频内容理解"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37
msgid ""
"Focus on single picture understanding. Suitable for scenarios requiring "
"efficient image analysis"
msgstr "专注于单图理解。适用于需要高效图像解析的场景"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40
msgid ""
"Focus on single picture understanding. Suitable for scenarios requiring "
"efficient image analysis (free)"
msgstr "专注于单图理解。适用于需要高效图像解析的场景(免费)"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46
msgid ""
"Quickly and accurately generate images based on user text descriptions. "
"Resolution supports 1024x1024"
msgstr "根据用户文字描述快速、精准生成图像。分辨率支持1024x1024"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49
msgid ""
"Generate high-quality images based on user text descriptions, supporting "
"multiple image sizes"
msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52
msgid ""
"Generate high-quality images based on user text descriptions, supporting "
"multiple image sizes (free)"
msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸(免费)"
#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75
msgid "zhipu AI"
msgstr "智谱 AI"
#: models_provider/serializers/model_serializer.py:43
#: models_provider/serializers/model_serializer.py:222
#: models_provider/serializers/model_serializer.py:259
#: models_provider/serializers/model_serializer.py:323
msgid "base model"
msgstr "基础模型"
#: models_provider/serializers/model_serializer.py:44
#: models_provider/serializers/model_serializer.py:260
msgid "parameter configuration"
msgstr "参数配置"
#: models_provider/serializers/model_serializer.py:45
#: models_provider/serializers/model_serializer.py:225
#: models_provider/serializers/model_serializer.py:261
#| msgid "Get current user information"
msgid "certification information"
msgstr "认证信息"
#: models_provider/serializers/model_serializer.py:233
#: models_provider/serializers/model_serializer.py:272
#, python-brace-format
msgid "base model【{model_name}】already exists"
msgstr "模型【{model_name}】已存在"
#: models_provider/serializers/model_serializer.py:312
msgid "Model saving failed"
msgstr "模型保存失败"
#: models_provider/serializers/model_serializer.py:325
msgid "create user"
msgstr "创建者"
#: models_provider/views/model.py:28 models_provider/views/model.py:29
#: models_provider/views/model.py:30
msgid "Create model"
msgstr "创建模型"
#: models_provider/views/model.py:31 models_provider/views/model.py:59
#: models_provider/views/model.py:77 models_provider/views/model.py:90
#: models_provider/views/model.py:102 models_provider/views/model.py:117
#: models_provider/views/model.py:130 models_provider/views/model.py:148
#: models_provider/views/model.py:164 models_provider/views/provide.py:25
#: models_provider/views/provide.py:49 models_provider/views/provide.py:64
#: models_provider/views/provide.py:83 models_provider/views/provide.py:101
msgid "Model"
msgstr "模型"
#: models_provider/views/model.py:54 models_provider/views/model.py:55
#: models_provider/views/model.py:56
msgid "Query model list"
msgstr "查询模型列表"
#: models_provider/views/model.py:71 models_provider/views/model.py:72
#: models_provider/views/model.py:73
msgid "Update model"
msgstr "更新模型"
#: models_provider/views/model.py:85 models_provider/views/model.py:86
#: models_provider/views/model.py:87
msgid "Delete model"
msgstr "删除模型"
#: models_provider/views/model.py:97 models_provider/views/model.py:98
#: models_provider/views/model.py:99
msgid "Query model details"
msgstr "查询模型详情"
#: models_provider/views/model.py:112 models_provider/views/model.py:113
#: models_provider/views/model.py:114
msgid "Get model parameter form"
msgstr "获取模型参数表单"
#: models_provider/views/model.py:124 models_provider/views/model.py:125
#: models_provider/views/model.py:126
msgid "Save model parameter form"
msgstr "保存模型参数表单"
#: models_provider/views/model.py:141 models_provider/views/model.py:143
#: models_provider/views/model.py:145
msgid ""
"Query model meta information, this interface does not carry authentication "
"information"
msgstr "查询模型元信息,该接口不携带认证信息"
#: models_provider/views/model.py:158 models_provider/views/model.py:159
#: models_provider/views/model.py:160
msgid "Pause model download"
msgstr "下载模型暂停"
#: models_provider/views/provide.py:21 models_provider/views/provide.py:22
#: models_provider/views/provide.py:23
msgid "Get a list of model suppliers"
msgstr "获取模型供应商列表"
#: models_provider/views/provide.py:44 models_provider/views/provide.py:45
#: models_provider/views/provide.py:46
msgid "Get a list of model types"
msgstr "获取模型类型列表"
#: models_provider/views/provide.py:59 models_provider/views/provide.py:60
#: models_provider/views/provide.py:61
msgid "Example of obtaining model list"
msgstr "获取模型列表示例"
#: models_provider/views/provide.py:78 models_provider/views/provide.py:79
#: models_provider/views/provide.py:80
msgid "Get model default parameters"
msgstr "获取模型默认参数"
#: models_provider/views/provide.py:96 models_provider/views/provide.py:97
#: models_provider/views/provide.py:98
msgid "Get the model creation form"
msgstr "获取模型创建表单"
#: tools/serializers/tool.py:91 tools/serializers/tool.py:153
msgid "variable name"
msgstr "变量名称"
#: tools/serializers/tool.py:93
msgid "type"
msgstr "类型"
#: tools/serializers/tool.py:95
msgid "fields only support string|int|dict|array|float"
msgstr "字段仅支持字符串|整数|字典|数组|浮点数"
#: tools/serializers/tool.py:99
msgid "The field only supports custom|reference"
msgstr "字段仅支持自定义|引用"
#: tools/serializers/tool.py:104
#| msgid "model name"
msgid "field name"
msgstr "字段名称"
#: tools/serializers/tool.py:105
#| msgid "label"
msgid "field label"
msgstr "标签"
#: tools/serializers/tool.py:115 tools/serializers/tool.py:133
#: tools/serializers/tool.py:340
msgid "tool name"
msgstr "工具名称"
#: tools/serializers/tool.py:118 tools/serializers/tool.py:136
msgid "tool description"
msgstr "工具描述"
#: tools/serializers/tool.py:120 tools/serializers/tool.py:138
#: tools/serializers/tool.py:158
msgid "tool content"
msgstr "工具内容"
#: tools/serializers/tool.py:123 tools/serializers/tool.py:141
#: tools/serializers/tool.py:160
msgid "input field list"
msgstr "输入字段列表"
#: tools/serializers/tool.py:125 tools/serializers/tool.py:143
#: tools/serializers/tool.py:161
msgid "init field list"
msgstr "内置字段列表"
#: tools/serializers/tool.py:145 tools/serializers/tool.py:162
msgid "init params"
msgstr "内置参数"
#: tools/serializers/tool.py:154
#| msgid "variable name"
msgid "variable value"
msgstr "变量名称"
#: tools/serializers/tool.py:218
msgid "field has no value set"
msgstr "字段未设置值"
#: tools/serializers/tool.py:234 tools/serializers/tool.py:239
msgid "type error"
msgstr "类型错误"
#: tools/serializers/tool.py:242
#, python-brace-format
msgid "Field: {name} Type: {_type} Value: {value} Type conversion error"
msgstr "字段:{name} 类型:{_type} 值:{value} 类型转换错误"
#: tools/serializers/tool.py:247
#| msgid "model id"
msgid "tool id"
msgstr "工具 ID"
#: tools/serializers/tool.py:255
msgid "Tool not found"
msgstr "工具不存在"
#: tools/serializers/tool.py:290
msgid "file"
msgstr "文件"
#: tools/serializers/tool.py:291 users/api/user.py:39 users/api/user.py:51
#: users/api/user.py:67 users/serializers/user.py:262
msgid "User ID"
msgstr "用户 ID"
#: tools/serializers/tool.py:304
msgid "Unsupported file format"
msgstr "不支持的文件格式"
#: tools/serializers/tool.py:330 tools/serializers/tool.py:349
#| msgid "Module not found"
msgid "Folder not found"
msgstr "文件夹不存在"
#: tools/serializers/tool.py:341
#| msgid "model type"
msgid "tool type"
msgstr "工具类型"
#: tools/views/tool.py:21 tools/views/tool.py:22
msgid "Create tool"
msgstr "创建工具"
#: tools/views/tool.py:26 tools/views/tool.py:40 tools/views/tool.py:57
#: tools/views/tool.py:75 tools/views/tool.py:89 tools/views/tool.py:103
#: tools/views/tool.py:120 tools/views/tool.py:144 tools/views/tool.py:161
msgid "Tool"
msgstr "工具"
#: tools/views/tool.py:36 tools/views/tool.py:37
#| msgid "Get module"
msgid "Get tool by folder"
msgstr "通过文件夹获取工具"
#: tools/views/tool.py:53 tools/views/tool.py:54
msgid "Debug Tool"
msgstr "调试工具"
#: tools/views/tool.py:70 tools/views/tool.py:71
#| msgid "Update model"
msgid "Update tool"
msgstr "更新工具"
#: tools/views/tool.py:85 tools/views/tool.py:86
#| msgid "Create tool"
msgid "Get tool"
msgstr "获取工具"
#: tools/views/tool.py:99 tools/views/tool.py:100
#| msgid "Delete model"
msgid "Delete tool"
msgstr "删除工具"
#: tools/views/tool.py:116 tools/views/tool.py:117
msgid "Get tool list by pagination"
msgstr "获取工具列表"
#: tools/views/tool.py:139 tools/views/tool.py:140
#| msgid "Create tool"
msgid "Import tool"
msgstr "导入工具"
#: tools/views/tool.py:157 tools/views/tool.py:158
#| msgid "Create tool"
msgid "Export tool"
msgstr "导出工具"
#: users/api/user.py:90
#| msgid "Username"
msgid "Email or Username"
msgstr "邮箱或用户名"
#: users/api/user.py:106
#| msgid "workspace id"
msgid "Workspace ID"
msgstr "工作空间 ID"
#: users/serializers/login.py:27 users/serializers/user.py:40
#: users/serializers/user.py:87
msgid "Username"
msgstr "用户名"
#: users/serializers/login.py:28 users/serializers/user.py:41
#: users/serializers/user.py:99 users/serializers/user.py:228
msgid "Password"
msgstr "密码"
#: users/serializers/login.py:29 users/serializers/login.py:69
msgid "captcha"
msgstr "验证码"
#: users/serializers/login.py:36
msgid "token"
msgstr "令牌"
#: users/serializers/login.py:50
msgid "Captcha code error or expiration"
msgstr "验证码错误或过期"
#: users/serializers/login.py:55
msgid "The user has been disabled, please contact the administrator!"
msgstr "用户已被禁用,请联系管理员!"
#: users/serializers/user.py:31
#| msgid "Password"
msgid "Is Edit Password"
msgstr "是否编辑密码"
#: users/serializers/user.py:32
#| msgid "No permission to access"
msgid "permissions"
msgstr "无权限访问"
#: users/serializers/user.py:42 users/serializers/user.py:79
#: users/serializers/user.py:191
msgid "Email"
msgstr "邮箱"
#: users/serializers/user.py:43 users/serializers/user.py:113
#| msgid "model name"
msgid "Nick name"
msgstr "昵称"
#: users/serializers/user.py:44 users/serializers/user.py:120
#: users/serializers/user.py:206
msgid "Phone"
msgstr "手机"
#: users/serializers/user.py:93
msgid "Username must be 6-20 characters long"
msgstr "用户名必须为6-20个字符"
#: users/serializers/user.py:106 users/serializers/user.py:235
msgid ""
"The password must be 6-20 characters long and must be a combination of "
"letters, numbers, and special characters."
msgstr "密码必须为6-20个字符且必须包含字母、数字和特殊字符。"
#: users/serializers/user.py:142
msgid "Email or username"
msgstr "邮箱或用户名"
#: users/serializers/user.py:168
msgid ""
"The community version supports up to 2 users. If you need more users, please "
"contact us (https://fit2cloud.com/)."
msgstr "社区版支持最多2个用户如需更多用户请联系我们https://fit2cloud.com/)。"
#: users/serializers/user.py:199
msgid "Name"
msgstr "用户名"
#: users/serializers/user.py:213
#, fuzzy
#| msgid "Is active"
msgid "Is Active"
msgstr "是否启用"
#: users/serializers/user.py:223
#| msgid "Model saving failed"
msgid "Email is already in use"
msgstr "邮箱已被使用"
#: users/serializers/user.py:242
#| msgid "Password"
msgid "Re Password"
msgstr "确认密码"
#: users/serializers/user.py:247
msgid ""
"The confirmation password must be 6-20 characters long and must be a "
"combination of letters, numbers, and special characters."
msgstr "确认密码必须为6-20个字符且必须包含字母、数字和特殊字符。"
#: users/serializers/user.py:270
#| msgid "Model does not exist"
msgid "User does not exist"
msgstr "用户不存在"
#: users/serializers/user.py:285
#| msgid "Super administrator"
msgid "Unable to delete administrator"
msgstr "无法删除管理员"
#: users/serializers/user.py:302
msgid "Cannot modify administrator status"
msgstr "不能修改管理员状态"
#: users/views/login.py:21 users/views/login.py:22 users/views/login.py:23
msgid "Log in"
msgstr "登录"
#: users/views/login.py:24 users/views/login.py:36 users/views/user.py:31
#: users/views/user.py:44 users/views/user.py:58 users/views/user.py:73
#: users/views/user.py:87 users/views/user.py:98 users/views/user.py:109
#: users/views/user.py:125 users/views/user.py:140
msgid "User management"
msgstr "用户管理"
#: users/views/login.py:33 users/views/login.py:34 users/views/login.py:35
msgid "Get captcha"
msgstr "获取验证码"
#: users/views/user.py:28 users/views/user.py:29 users/views/user.py:30
#: users/views/user.py:41 users/views/user.py:42
msgid "Get current user information"
msgstr "获取当前用户信息"
#: users/views/user.py:70 users/views/user.py:71 users/views/user.py:72
#| msgid "create user"
msgid "Create user"
msgstr "创建者"
#: users/views/user.py:84 users/views/user.py:85 users/views/user.py:86
#| msgid "Delete model"
msgid "Delete user"
msgstr "删除用户"
#: users/views/user.py:95 users/views/user.py:96 users/views/user.py:97
#| msgid "Get current user information"
msgid "Get user information"
msgstr "获取用户信息"
#: users/views/user.py:106 users/views/user.py:107 users/views/user.py:108
#| msgid "Get current user information"
msgid "Update user information"
msgstr "更新当前用户信息"
#: users/views/user.py:122 users/views/user.py:123 users/views/user.py:124
#| msgid "Password"
msgid "Change password"
msgstr "修改密码"
#: users/views/user.py:137 users/views/user.py:138 users/views/user.py:139
msgid "Get user paginated list"
msgstr "获取用户分页列表"