parent
0cc1d00140
commit
061a41c4a1
@ -65,16 +65,21 @@ class IChatStep(IBaseChatPipelineStep):
|
|||||||
post_response_handler = InstanceField(model_type=PostResponseHandler,
|
post_response_handler = InstanceField(model_type=PostResponseHandler,
|
||||||
error_messages=ErrMessage.base(_("Post-processor")))
|
error_messages=ErrMessage.base(_("Post-processor")))
|
||||||
# 补全问题
|
# 补全问题
|
||||||
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.base(_("Completion Question")))
|
padding_problem_text = serializers.CharField(required=False,
|
||||||
|
error_messages=ErrMessage.base(_("Completion Question")))
|
||||||
# 是否使用流的形式输出
|
# 是否使用流的形式输出
|
||||||
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base(_("Streaming Output")))
|
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base(_("Streaming Output")))
|
||||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
||||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
||||||
# 未查询到引用分段
|
# 未查询到引用分段
|
||||||
no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base(_("No reference segment settings")))
|
no_references_setting = NoReferencesSetting(required=True,
|
||||||
|
error_messages=ErrMessage.base(_("No reference segment settings")))
|
||||||
|
|
||||||
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
|
||||||
|
|
||||||
|
model_setting = serializers.DictField(required=True, allow_null=True,
|
||||||
|
error_messages=ErrMessage.dict(_("Model settings")))
|
||||||
|
|
||||||
model_params_setting = serializers.DictField(required=False, allow_null=True,
|
model_params_setting = serializers.DictField(required=False, allow_null=True,
|
||||||
error_messages=ErrMessage.dict(_("Model parameter settings")))
|
error_messages=ErrMessage.dict(_("Model parameter settings")))
|
||||||
|
|
||||||
@ -101,5 +106,5 @@ class IChatStep(IBaseChatPipelineStep):
|
|||||||
paragraph_list=None,
|
paragraph_list=None,
|
||||||
manage: PipelineManage = None,
|
manage: PipelineManage = None,
|
||||||
padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None,
|
padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None,
|
||||||
no_references_setting=None, model_params_setting=None, **kwargs):
|
no_references_setting=None, model_params_setting=None, model_setting=None, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -24,6 +24,7 @@ from rest_framework import status
|
|||||||
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
|
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
|
||||||
from application.chat_pipeline.pipeline_manage import PipelineManage
|
from application.chat_pipeline.pipeline_manage import PipelineManage
|
||||||
from application.chat_pipeline.step.chat_step.i_chat_step import IChatStep, PostResponseHandler
|
from application.chat_pipeline.step.chat_step.i_chat_step import IChatStep, PostResponseHandler
|
||||||
|
from application.flow.tools import Reasoning
|
||||||
from application.models.api_key_model import ApplicationPublicAccessClient
|
from application.models.api_key_model import ApplicationPublicAccessClient
|
||||||
from common.constants.authentication_type import AuthenticationType
|
from common.constants.authentication_type import AuthenticationType
|
||||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||||
@ -63,17 +64,37 @@ def event_content(response,
|
|||||||
problem_text: str,
|
problem_text: str,
|
||||||
padding_problem_text: str = None,
|
padding_problem_text: str = None,
|
||||||
client_id=None, client_type=None,
|
client_id=None, client_type=None,
|
||||||
is_ai_chat: bool = None):
|
is_ai_chat: bool = None,
|
||||||
|
model_setting=None):
|
||||||
|
if model_setting is None:
|
||||||
|
model_setting = {}
|
||||||
|
reasoning_content_enable = model_setting.get('reasoning_content_enable', False)
|
||||||
|
reasoning_content_start = model_setting.get('reasoning_content_start', '<think>')
|
||||||
|
reasoning_content_end = model_setting.get('reasoning_content_end', '</think>')
|
||||||
|
reasoning = Reasoning(reasoning_content_start,
|
||||||
|
reasoning_content_end)
|
||||||
all_text = ''
|
all_text = ''
|
||||||
|
reasoning_content = ''
|
||||||
try:
|
try:
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
all_text += chunk.content
|
reasoning_chunk = reasoning.get_reasoning_content(chunk)
|
||||||
|
content_chunk = reasoning_chunk.get('content')
|
||||||
|
if 'reasoning_content' in chunk.additional_kwargs:
|
||||||
|
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
|
||||||
|
else:
|
||||||
|
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
|
||||||
|
all_text += content_chunk
|
||||||
|
if reasoning_content_chunk is None:
|
||||||
|
reasoning_content_chunk = ''
|
||||||
|
reasoning_content += reasoning_content_chunk
|
||||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
||||||
[], chunk.content,
|
[], content_chunk,
|
||||||
False,
|
False,
|
||||||
0, 0, {'node_is_end': False,
|
0, 0, {'node_is_end': False,
|
||||||
'view_type': 'many_view',
|
'view_type': 'many_view',
|
||||||
'node_type': 'ai-chat-node'})
|
'node_type': 'ai-chat-node',
|
||||||
|
'real_node_id': 'ai-chat-node',
|
||||||
|
'reasoning_content': reasoning_content_chunk if reasoning_content_enable else ''})
|
||||||
# 获取token
|
# 获取token
|
||||||
if is_ai_chat:
|
if is_ai_chat:
|
||||||
try:
|
try:
|
||||||
@ -87,7 +108,8 @@ def event_content(response,
|
|||||||
response_token = 0
|
response_token = 0
|
||||||
write_context(step, manage, request_token, response_token, all_text)
|
write_context(step, manage, request_token, response_token, all_text)
|
||||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||||
all_text, manage, step, padding_problem_text, client_id)
|
all_text, manage, step, padding_problem_text, client_id,
|
||||||
|
reasoning_content=reasoning_content if reasoning_content_enable else '')
|
||||||
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
|
||||||
[], '', True,
|
[], '', True,
|
||||||
request_token, response_token,
|
request_token, response_token,
|
||||||
@ -122,17 +144,20 @@ class BaseChatStep(IChatStep):
|
|||||||
client_id=None, client_type=None,
|
client_id=None, client_type=None,
|
||||||
no_references_setting=None,
|
no_references_setting=None,
|
||||||
model_params_setting=None,
|
model_params_setting=None,
|
||||||
|
model_setting=None,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
chat_model = get_model_instance_by_model_user_id(model_id, user_id,
|
chat_model = get_model_instance_by_model_user_id(model_id, user_id,
|
||||||
**model_params_setting) if model_id is not None else None
|
**model_params_setting) if model_id is not None else None
|
||||||
if stream:
|
if stream:
|
||||||
return self.execute_stream(message_list, chat_id, problem_text, post_response_handler, chat_model,
|
return self.execute_stream(message_list, chat_id, problem_text, post_response_handler, chat_model,
|
||||||
paragraph_list,
|
paragraph_list,
|
||||||
manage, padding_problem_text, client_id, client_type, no_references_setting)
|
manage, padding_problem_text, client_id, client_type, no_references_setting,
|
||||||
|
model_setting)
|
||||||
else:
|
else:
|
||||||
return self.execute_block(message_list, chat_id, problem_text, post_response_handler, chat_model,
|
return self.execute_block(message_list, chat_id, problem_text, post_response_handler, chat_model,
|
||||||
paragraph_list,
|
paragraph_list,
|
||||||
manage, padding_problem_text, client_id, client_type, no_references_setting)
|
manage, padding_problem_text, client_id, client_type, no_references_setting,
|
||||||
|
model_setting)
|
||||||
|
|
||||||
def get_details(self, manage, **kwargs):
|
def get_details(self, manage, **kwargs):
|
||||||
return {
|
return {
|
||||||
@ -187,14 +212,15 @@ class BaseChatStep(IChatStep):
|
|||||||
manage: PipelineManage = None,
|
manage: PipelineManage = None,
|
||||||
padding_problem_text: str = None,
|
padding_problem_text: str = None,
|
||||||
client_id=None, client_type=None,
|
client_id=None, client_type=None,
|
||||||
no_references_setting=None):
|
no_references_setting=None,
|
||||||
|
model_setting=None):
|
||||||
chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list,
|
chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list,
|
||||||
no_references_setting, problem_text)
|
no_references_setting, problem_text)
|
||||||
chat_record_id = uuid.uuid1()
|
chat_record_id = uuid.uuid1()
|
||||||
r = StreamingHttpResponse(
|
r = StreamingHttpResponse(
|
||||||
streaming_content=event_content(chat_result, chat_id, chat_record_id, paragraph_list,
|
streaming_content=event_content(chat_result, chat_id, chat_record_id, paragraph_list,
|
||||||
post_response_handler, manage, self, chat_model, message_list, problem_text,
|
post_response_handler, manage, self, chat_model, message_list, problem_text,
|
||||||
padding_problem_text, client_id, client_type, is_ai_chat),
|
padding_problem_text, client_id, client_type, is_ai_chat, model_setting),
|
||||||
content_type='text/event-stream;charset=utf-8')
|
content_type='text/event-stream;charset=utf-8')
|
||||||
|
|
||||||
r['Cache-Control'] = 'no-cache'
|
r['Cache-Control'] = 'no-cache'
|
||||||
@ -230,7 +256,13 @@ class BaseChatStep(IChatStep):
|
|||||||
paragraph_list=None,
|
paragraph_list=None,
|
||||||
manage: PipelineManage = None,
|
manage: PipelineManage = None,
|
||||||
padding_problem_text: str = None,
|
padding_problem_text: str = None,
|
||||||
client_id=None, client_type=None, no_references_setting=None):
|
client_id=None, client_type=None, no_references_setting=None,
|
||||||
|
model_setting=None):
|
||||||
|
reasoning_content_enable = model_setting.get('reasoning_content_enable', False)
|
||||||
|
reasoning_content_start = model_setting.get('reasoning_content_start', '<think>')
|
||||||
|
reasoning_content_end = model_setting.get('reasoning_content_end', '</think>')
|
||||||
|
reasoning = Reasoning(reasoning_content_start,
|
||||||
|
reasoning_content_end)
|
||||||
chat_record_id = uuid.uuid1()
|
chat_record_id = uuid.uuid1()
|
||||||
# 调用模型
|
# 调用模型
|
||||||
try:
|
try:
|
||||||
@ -243,14 +275,23 @@ class BaseChatStep(IChatStep):
|
|||||||
request_token = 0
|
request_token = 0
|
||||||
response_token = 0
|
response_token = 0
|
||||||
write_context(self, manage, request_token, response_token, chat_result.content)
|
write_context(self, manage, request_token, response_token, chat_result.content)
|
||||||
|
reasoning.get_reasoning_content(chat_result)
|
||||||
|
reasoning_result = reasoning.get_reasoning_content(chat_result)
|
||||||
|
content = reasoning_result.get('content')
|
||||||
|
if 'reasoning_content' in chat_result.response_metadata:
|
||||||
|
reasoning_content = chat_result.response_metadata.get('reasoning_content', '')
|
||||||
|
else:
|
||||||
|
reasoning_content = reasoning_result.get('reasoning_content')
|
||||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||||
chat_result.content, manage, self, padding_problem_text, client_id)
|
chat_result.content, manage, self, padding_problem_text, client_id,
|
||||||
|
reasoning_content=reasoning_content if reasoning_content_enable else '')
|
||||||
add_access_num(client_id, client_type, manage.context.get('application_id'))
|
add_access_num(client_id, client_type, manage.context.get('application_id'))
|
||||||
return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id),
|
return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id),
|
||||||
chat_result.content, True,
|
content, True,
|
||||||
request_token, response_token)
|
request_token, response_token,
|
||||||
|
{'reasoning_content': reasoning_content})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
all_text = '异常' + str(e)
|
all_text = 'Exception:' + str(e)
|
||||||
write_context(self, manage, 0, 0, all_text)
|
write_context(self, manage, 0, 0, all_text)
|
||||||
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
|
||||||
all_text, manage, self, padding_problem_text, client_id)
|
all_text, manage, self, padding_problem_text, client_id)
|
||||||
|
|||||||
@ -9,16 +9,22 @@
|
|||||||
|
|
||||||
|
|
||||||
class Answer:
|
class Answer:
|
||||||
def __init__(self, content, view_type, runtime_node_id, chat_record_id, child_node):
|
def __init__(self, content, view_type, runtime_node_id, chat_record_id, child_node, real_node_id,
|
||||||
|
reasoning_content):
|
||||||
self.view_type = view_type
|
self.view_type = view_type
|
||||||
self.content = content
|
self.content = content
|
||||||
|
self.reasoning_content = reasoning_content
|
||||||
self.runtime_node_id = runtime_node_id
|
self.runtime_node_id = runtime_node_id
|
||||||
self.chat_record_id = chat_record_id
|
self.chat_record_id = chat_record_id
|
||||||
self.child_node = child_node
|
self.child_node = child_node
|
||||||
|
self.real_node_id = real_node_id
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
return {'view_type': self.view_type, 'content': self.content, 'runtime_node_id': self.runtime_node_id,
|
return {'view_type': self.view_type, 'content': self.content, 'runtime_node_id': self.runtime_node_id,
|
||||||
'chat_record_id': self.chat_record_id, 'child_node': self.child_node}
|
'chat_record_id': self.chat_record_id,
|
||||||
|
'child_node': self.child_node,
|
||||||
|
'reasoning_content': self.reasoning_content,
|
||||||
|
'real_node_id': self.real_node_id}
|
||||||
|
|
||||||
|
|
||||||
class NodeChunk:
|
class NodeChunk:
|
||||||
|
|||||||
@ -62,7 +62,9 @@ class WorkFlowPostHandler:
|
|||||||
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
|
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
|
||||||
'answer_tokens' in row and row.get('answer_tokens') is not None])
|
'answer_tokens' in row and row.get('answer_tokens') is not None])
|
||||||
answer_text_list = workflow.get_answer_text_list()
|
answer_text_list = workflow.get_answer_text_list()
|
||||||
answer_text = '\n\n'.join(answer['content'] for answer in answer_text_list)
|
answer_text = '\n\n'.join(
|
||||||
|
'\n\n'.join([a.get('content') for a in answer]) for answer in
|
||||||
|
answer_text_list)
|
||||||
if workflow.chat_record is not None:
|
if workflow.chat_record is not None:
|
||||||
chat_record = workflow.chat_record
|
chat_record = workflow.chat_record
|
||||||
chat_record.answer_text = answer_text
|
chat_record.answer_text = answer_text
|
||||||
@ -157,8 +159,10 @@ class INode:
|
|||||||
def get_answer_list(self) -> List[Answer] | None:
|
def get_answer_list(self) -> List[Answer] | None:
|
||||||
if self.answer_text is None:
|
if self.answer_text is None:
|
||||||
return None
|
return None
|
||||||
|
reasoning_content_enable = self.context.get('model_setting', {}).get('reasoning_content_enable', False)
|
||||||
return [
|
return [
|
||||||
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {})]
|
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {},
|
||||||
|
self.runtime_node_id, self.context.get('reasoning_content', '') if reasoning_content_enable else '')]
|
||||||
|
|
||||||
def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None,
|
def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None,
|
||||||
get_node_params=lambda node: node.properties.get('node_data')):
|
get_node_params=lambda node: node.properties.get('node_data')):
|
||||||
|
|||||||
@ -28,8 +28,9 @@ class ChatNodeSerializer(serializers.Serializer):
|
|||||||
error_messages=ErrMessage.boolean(_('Whether to return content')))
|
error_messages=ErrMessage.boolean(_('Whether to return content')))
|
||||||
|
|
||||||
model_params_setting = serializers.DictField(required=False,
|
model_params_setting = serializers.DictField(required=False,
|
||||||
error_messages=ErrMessage.integer(_("Model parameter settings")))
|
error_messages=ErrMessage.dict(_("Model parameter settings")))
|
||||||
|
model_setting = serializers.DictField(required=False,
|
||||||
|
error_messages=ErrMessage.dict('Model settings'))
|
||||||
dialogue_type = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
dialogue_type = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||||
error_messages=ErrMessage.char(_("Context Type")))
|
error_messages=ErrMessage.char(_("Context Type")))
|
||||||
|
|
||||||
@ -47,5 +48,6 @@ class IChatNode(INode):
|
|||||||
chat_record_id,
|
chat_record_id,
|
||||||
model_params_setting=None,
|
model_params_setting=None,
|
||||||
dialogue_type=None,
|
dialogue_type=None,
|
||||||
|
model_setting=None,
|
||||||
**kwargs) -> NodeResult:
|
**kwargs) -> NodeResult:
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -14,14 +14,17 @@ from django.db.models import QuerySet
|
|||||||
from langchain.schema import HumanMessage, SystemMessage
|
from langchain.schema import HumanMessage, SystemMessage
|
||||||
from langchain_core.messages import BaseMessage, AIMessage
|
from langchain_core.messages import BaseMessage, AIMessage
|
||||||
|
|
||||||
|
from application.flow.common import Answer
|
||||||
from application.flow.i_step_node import NodeResult, INode
|
from application.flow.i_step_node import NodeResult, INode
|
||||||
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
|
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
|
||||||
|
from application.flow.tools import Reasoning
|
||||||
from setting.models import Model
|
from setting.models import Model
|
||||||
from setting.models_provider import get_model_credential
|
from setting.models_provider import get_model_credential
|
||||||
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
from setting.models_provider.tools import get_model_instance_by_model_user_id
|
||||||
|
|
||||||
|
|
||||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
|
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
|
||||||
|
reasoning_content: str):
|
||||||
chat_model = node_variable.get('chat_model')
|
chat_model = node_variable.get('chat_model')
|
||||||
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
|
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
|
||||||
answer_tokens = chat_model.get_num_tokens(answer)
|
answer_tokens = chat_model.get_num_tokens(answer)
|
||||||
@ -31,6 +34,7 @@ def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wo
|
|||||||
node.context['history_message'] = node_variable['history_message']
|
node.context['history_message'] = node_variable['history_message']
|
||||||
node.context['question'] = node_variable['question']
|
node.context['question'] = node_variable['question']
|
||||||
node.context['run_time'] = time.time() - node.context['start_time']
|
node.context['run_time'] = time.time() - node.context['start_time']
|
||||||
|
node.context['reasoning_content'] = reasoning_content
|
||||||
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
||||||
node.answer_text = answer
|
node.answer_text = answer
|
||||||
|
|
||||||
@ -45,10 +49,27 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||||||
"""
|
"""
|
||||||
response = node_variable.get('result')
|
response = node_variable.get('result')
|
||||||
answer = ''
|
answer = ''
|
||||||
|
reasoning_content = ''
|
||||||
|
model_setting = node.context.get('model_setting',
|
||||||
|
{'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
|
||||||
|
'reasoning_content_start': '<think>'})
|
||||||
|
reasoning = Reasoning(model_setting.get('reasoning_content_start', '<think>'),
|
||||||
|
model_setting.get('reasoning_content_end', '</think>'))
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
answer += chunk.content
|
reasoning_chunk = reasoning.get_reasoning_content(chunk)
|
||||||
yield chunk.content
|
content_chunk = reasoning_chunk.get('content')
|
||||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
if 'reasoning_content' in chunk.additional_kwargs:
|
||||||
|
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
|
||||||
|
else:
|
||||||
|
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
|
||||||
|
answer += content_chunk
|
||||||
|
if reasoning_content_chunk is None:
|
||||||
|
reasoning_content_chunk = ''
|
||||||
|
reasoning_content += reasoning_content_chunk
|
||||||
|
yield {'content': content_chunk,
|
||||||
|
'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable',
|
||||||
|
False) else ''}
|
||||||
|
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||||
|
|
||||||
|
|
||||||
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||||
@ -60,8 +81,17 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
|
|||||||
@param workflow: 工作流管理器
|
@param workflow: 工作流管理器
|
||||||
"""
|
"""
|
||||||
response = node_variable.get('result')
|
response = node_variable.get('result')
|
||||||
answer = response.content
|
model_setting = node.context.get('model_setting',
|
||||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
{'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
|
||||||
|
'reasoning_content_start': '<think>'})
|
||||||
|
reasoning = Reasoning(model_setting.get('reasoning_content_start'), model_setting.get('reasoning_content_end'))
|
||||||
|
reasoning_result = reasoning.get_reasoning_content(response)
|
||||||
|
content = reasoning_result.get('content')
|
||||||
|
if 'reasoning_content' in response.response_metadata:
|
||||||
|
reasoning_content = response.response_metadata.get('reasoning_content', '')
|
||||||
|
else:
|
||||||
|
reasoning_content = reasoning_result.get('reasoning_content')
|
||||||
|
_write_context(node_variable, workflow_variable, node, workflow, content, reasoning_content)
|
||||||
|
|
||||||
|
|
||||||
def get_default_model_params_setting(model_id):
|
def get_default_model_params_setting(model_id):
|
||||||
@ -92,17 +122,23 @@ class BaseChatNode(IChatNode):
|
|||||||
def save_context(self, details, workflow_manage):
|
def save_context(self, details, workflow_manage):
|
||||||
self.context['answer'] = details.get('answer')
|
self.context['answer'] = details.get('answer')
|
||||||
self.context['question'] = details.get('question')
|
self.context['question'] = details.get('question')
|
||||||
|
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||||
self.answer_text = details.get('answer')
|
self.answer_text = details.get('answer')
|
||||||
|
|
||||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||||
model_params_setting=None,
|
model_params_setting=None,
|
||||||
dialogue_type=None,
|
dialogue_type=None,
|
||||||
|
model_setting=None,
|
||||||
**kwargs) -> NodeResult:
|
**kwargs) -> NodeResult:
|
||||||
if dialogue_type is None:
|
if dialogue_type is None:
|
||||||
dialogue_type = 'WORKFLOW'
|
dialogue_type = 'WORKFLOW'
|
||||||
|
|
||||||
if model_params_setting is None:
|
if model_params_setting is None:
|
||||||
model_params_setting = get_default_model_params_setting(model_id)
|
model_params_setting = get_default_model_params_setting(model_id)
|
||||||
|
if model_setting is None:
|
||||||
|
model_setting = {'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
|
||||||
|
'reasoning_content_start': '<think>'}
|
||||||
|
self.context['model_setting'] = model_setting
|
||||||
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
|
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
|
||||||
**model_params_setting)
|
**model_params_setting)
|
||||||
history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type,
|
history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type,
|
||||||
@ -164,6 +200,7 @@ class BaseChatNode(IChatNode):
|
|||||||
'history_message') is not None else [])],
|
'history_message') is not None else [])],
|
||||||
'question': self.context.get('question'),
|
'question': self.context.get('question'),
|
||||||
'answer': self.context.get('answer'),
|
'answer': self.context.get('answer'),
|
||||||
|
'reasoning_content': self.context.get('reasoning_content'),
|
||||||
'type': self.node.type,
|
'type': self.node.type,
|
||||||
'message_tokens': self.context.get('message_tokens'),
|
'message_tokens': self.context.get('message_tokens'),
|
||||||
'answer_tokens': self.context.get('answer_tokens'),
|
'answer_tokens': self.context.get('answer_tokens'),
|
||||||
|
|||||||
@ -19,7 +19,8 @@ def _is_interrupt_exec(node, node_variable: Dict, workflow_variable: Dict):
|
|||||||
return node_variable.get('is_interrupt_exec', False)
|
return node_variable.get('is_interrupt_exec', False)
|
||||||
|
|
||||||
|
|
||||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
|
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
|
||||||
|
reasoning_content: str):
|
||||||
result = node_variable.get('result')
|
result = node_variable.get('result')
|
||||||
node.context['application_node_dict'] = node_variable.get('application_node_dict')
|
node.context['application_node_dict'] = node_variable.get('application_node_dict')
|
||||||
node.context['node_dict'] = node_variable.get('node_dict', {})
|
node.context['node_dict'] = node_variable.get('node_dict', {})
|
||||||
@ -28,6 +29,7 @@ def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wo
|
|||||||
node.context['answer_tokens'] = result.get('usage', {}).get('completion_tokens', 0)
|
node.context['answer_tokens'] = result.get('usage', {}).get('completion_tokens', 0)
|
||||||
node.context['answer'] = answer
|
node.context['answer'] = answer
|
||||||
node.context['result'] = answer
|
node.context['result'] = answer
|
||||||
|
node.context['reasoning_content'] = reasoning_content
|
||||||
node.context['question'] = node_variable['question']
|
node.context['question'] = node_variable['question']
|
||||||
node.context['run_time'] = time.time() - node.context['start_time']
|
node.context['run_time'] = time.time() - node.context['start_time']
|
||||||
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
||||||
@ -44,6 +46,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||||||
"""
|
"""
|
||||||
response = node_variable.get('result')
|
response = node_variable.get('result')
|
||||||
answer = ''
|
answer = ''
|
||||||
|
reasoning_content = ''
|
||||||
usage = {}
|
usage = {}
|
||||||
node_child_node = {}
|
node_child_node = {}
|
||||||
application_node_dict = node.context.get('application_node_dict', {})
|
application_node_dict = node.context.get('application_node_dict', {})
|
||||||
@ -60,9 +63,11 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||||||
node_type = response_content.get('node_type')
|
node_type = response_content.get('node_type')
|
||||||
real_node_id = response_content.get('real_node_id')
|
real_node_id = response_content.get('real_node_id')
|
||||||
node_is_end = response_content.get('node_is_end', False)
|
node_is_end = response_content.get('node_is_end', False)
|
||||||
|
_reasoning_content = response_content.get('reasoning_content', '')
|
||||||
if node_type == 'form-node':
|
if node_type == 'form-node':
|
||||||
is_interrupt_exec = True
|
is_interrupt_exec = True
|
||||||
answer += content
|
answer += content
|
||||||
|
reasoning_content += _reasoning_content
|
||||||
node_child_node = {'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
|
node_child_node = {'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
|
||||||
'child_node': child_node}
|
'child_node': child_node}
|
||||||
|
|
||||||
@ -75,13 +80,16 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||||||
'chat_record_id': chat_record_id,
|
'chat_record_id': chat_record_id,
|
||||||
'child_node': child_node,
|
'child_node': child_node,
|
||||||
'index': len(application_node_dict),
|
'index': len(application_node_dict),
|
||||||
'view_type': view_type}
|
'view_type': view_type,
|
||||||
|
'reasoning_content': _reasoning_content}
|
||||||
else:
|
else:
|
||||||
application_node['content'] += content
|
application_node['content'] += content
|
||||||
|
application_node['reasoning_content'] += _reasoning_content
|
||||||
|
|
||||||
yield {'content': content,
|
yield {'content': content,
|
||||||
'node_type': node_type,
|
'node_type': node_type,
|
||||||
'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
|
'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
|
||||||
|
'reasoning_content': _reasoning_content,
|
||||||
'child_node': child_node,
|
'child_node': child_node,
|
||||||
'real_node_id': real_node_id,
|
'real_node_id': real_node_id,
|
||||||
'node_is_end': node_is_end,
|
'node_is_end': node_is_end,
|
||||||
@ -91,7 +99,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||||||
node_variable['is_interrupt_exec'] = is_interrupt_exec
|
node_variable['is_interrupt_exec'] = is_interrupt_exec
|
||||||
node_variable['child_node'] = node_child_node
|
node_variable['child_node'] = node_child_node
|
||||||
node_variable['application_node_dict'] = application_node_dict
|
node_variable['application_node_dict'] = application_node_dict
|
||||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||||
|
|
||||||
|
|
||||||
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||||
@ -106,7 +114,8 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
|
|||||||
node_variable['result'] = {'usage': {'completion_tokens': response.get('completion_tokens'),
|
node_variable['result'] = {'usage': {'completion_tokens': response.get('completion_tokens'),
|
||||||
'prompt_tokens': response.get('prompt_tokens')}}
|
'prompt_tokens': response.get('prompt_tokens')}}
|
||||||
answer = response.get('content', '') or "抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。"
|
answer = response.get('content', '') or "抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。"
|
||||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
reasoning_content = response.get('reasoning_content', '')
|
||||||
|
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||||
|
|
||||||
|
|
||||||
def reset_application_node_dict(application_node_dict, runtime_node_id, node_data):
|
def reset_application_node_dict(application_node_dict, runtime_node_id, node_data):
|
||||||
@ -139,18 +148,22 @@ class BaseApplicationNode(IApplicationNode):
|
|||||||
if application_node_dict is None or len(application_node_dict) == 0:
|
if application_node_dict is None or len(application_node_dict) == 0:
|
||||||
return [
|
return [
|
||||||
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'],
|
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'],
|
||||||
self.context.get('child_node'))]
|
self.context.get('child_node'), self.runtime_node_id, '')]
|
||||||
else:
|
else:
|
||||||
return [Answer(n.get('content'), n.get('view_type'), self.runtime_node_id,
|
return [Answer(n.get('content'), n.get('view_type'), self.runtime_node_id,
|
||||||
self.workflow_params['chat_record_id'], {'runtime_node_id': n.get('runtime_node_id'),
|
self.workflow_params['chat_record_id'], {'runtime_node_id': n.get('runtime_node_id'),
|
||||||
'chat_record_id': n.get('chat_record_id')
|
'chat_record_id': n.get('chat_record_id')
|
||||||
, 'child_node': n.get('child_node')}) for n in
|
, 'child_node': n.get('child_node')}, n.get('real_node_id'),
|
||||||
|
n.get('reasoning_content', ''))
|
||||||
|
for n in
|
||||||
sorted(application_node_dict.values(), key=lambda item: item.get('index'))]
|
sorted(application_node_dict.values(), key=lambda item: item.get('index'))]
|
||||||
|
|
||||||
def save_context(self, details, workflow_manage):
|
def save_context(self, details, workflow_manage):
|
||||||
self.context['answer'] = details.get('answer')
|
self.context['answer'] = details.get('answer')
|
||||||
|
self.context['result'] = details.get('answer')
|
||||||
self.context['question'] = details.get('question')
|
self.context['question'] = details.get('question')
|
||||||
self.context['type'] = details.get('type')
|
self.context['type'] = details.get('type')
|
||||||
|
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||||
self.answer_text = details.get('answer')
|
self.answer_text = details.get('answer')
|
||||||
|
|
||||||
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
|
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
|
||||||
@ -229,6 +242,7 @@ class BaseApplicationNode(IApplicationNode):
|
|||||||
'run_time': self.context.get('run_time'),
|
'run_time': self.context.get('run_time'),
|
||||||
'question': self.context.get('question'),
|
'question': self.context.get('question'),
|
||||||
'answer': self.context.get('answer'),
|
'answer': self.context.get('answer'),
|
||||||
|
'reasoning_content': self.context.get('reasoning_content'),
|
||||||
'type': self.node.type,
|
'type': self.node.type,
|
||||||
'message_tokens': self.context.get('message_tokens'),
|
'message_tokens': self.context.get('message_tokens'),
|
||||||
'answer_tokens': self.context.get('answer_tokens'),
|
'answer_tokens': self.context.get('answer_tokens'),
|
||||||
|
|||||||
@ -75,7 +75,8 @@ class BaseFormNode(IFormNode):
|
|||||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||||
value = prompt_template.format(form=form, context=context)
|
value = prompt_template.format(form=form, context=context)
|
||||||
return [Answer(value, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], None)]
|
return [Answer(value, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], None,
|
||||||
|
self.runtime_node_id, '')]
|
||||||
|
|
||||||
def get_details(self, index: int, **kwargs):
|
def get_details(self, index: int, **kwargs):
|
||||||
form_content_format = self.context.get('form_content_format')
|
form_content_format = self.context.get('form_content_format')
|
||||||
|
|||||||
@ -16,6 +16,70 @@ from application.flow.i_step_node import WorkFlowPostHandler
|
|||||||
from common.response import result
|
from common.response import result
|
||||||
|
|
||||||
|
|
||||||
|
class Reasoning:
|
||||||
|
def __init__(self, reasoning_content_start, reasoning_content_end):
|
||||||
|
self.content = ""
|
||||||
|
self.reasoning_content = ""
|
||||||
|
self.all_content = ""
|
||||||
|
self.reasoning_content_start_tag = reasoning_content_start
|
||||||
|
self.reasoning_content_end_tag = reasoning_content_end
|
||||||
|
self.reasoning_content_start_tag_len = len(reasoning_content_start)
|
||||||
|
self.reasoning_content_end_tag_len = len(reasoning_content_end)
|
||||||
|
self.reasoning_content_end_tag_prefix = reasoning_content_end[0]
|
||||||
|
self.reasoning_content_is_start = False
|
||||||
|
self.reasoning_content_is_end = False
|
||||||
|
self.reasoning_content_chunk = ""
|
||||||
|
|
||||||
|
def get_reasoning_content(self, chunk):
|
||||||
|
self.all_content += chunk.content
|
||||||
|
if not self.reasoning_content_is_start and len(self.all_content) >= self.reasoning_content_start_tag_len:
|
||||||
|
if self.all_content.startswith(self.reasoning_content_start_tag):
|
||||||
|
self.reasoning_content_is_start = True
|
||||||
|
self.reasoning_content_chunk = self.all_content[self.reasoning_content_start_tag_len:]
|
||||||
|
else:
|
||||||
|
self.reasoning_content_is_end = True
|
||||||
|
else:
|
||||||
|
if self.reasoning_content_is_start:
|
||||||
|
self.reasoning_content_chunk += chunk.content
|
||||||
|
reasoning_content_end_tag_prefix_index = self.reasoning_content_chunk.find(
|
||||||
|
self.reasoning_content_end_tag_prefix)
|
||||||
|
if self.reasoning_content_is_end:
|
||||||
|
self.content += chunk.content
|
||||||
|
return {'content': chunk.content, 'reasoning_content': ''}
|
||||||
|
# 是否包含结束
|
||||||
|
if reasoning_content_end_tag_prefix_index > -1:
|
||||||
|
if len(
|
||||||
|
self.reasoning_content_chunk) - reasoning_content_end_tag_prefix_index > self.reasoning_content_end_tag_len:
|
||||||
|
reasoning_content_end_tag_index = self.reasoning_content_chunk.find(self.reasoning_content_end_tag)
|
||||||
|
if reasoning_content_end_tag_index > -1:
|
||||||
|
reasoning_content_chunk = self.reasoning_content_chunk[0:reasoning_content_end_tag_index]
|
||||||
|
content_chunk = self.reasoning_content_chunk[
|
||||||
|
reasoning_content_end_tag_index + self.reasoning_content_end_tag_len:]
|
||||||
|
self.reasoning_content += reasoning_content_chunk
|
||||||
|
self.content += content_chunk
|
||||||
|
self.reasoning_content_chunk = ""
|
||||||
|
self.reasoning_content_is_end = True
|
||||||
|
return {'content': content_chunk, 'reasoning_content': reasoning_content_chunk}
|
||||||
|
else:
|
||||||
|
reasoning_content_chunk = self.reasoning_content_chunk[0:reasoning_content_end_tag_prefix_index + 1]
|
||||||
|
self.reasoning_content_chunk = self.reasoning_content_chunk.replace(reasoning_content_chunk, '')
|
||||||
|
self.reasoning_content += reasoning_content_chunk
|
||||||
|
return {'content': '', 'reasoning_content': reasoning_content_chunk}
|
||||||
|
else:
|
||||||
|
return {'content': '', 'reasoning_content': ''}
|
||||||
|
|
||||||
|
else:
|
||||||
|
if self.reasoning_content_is_end:
|
||||||
|
self.content += chunk.content
|
||||||
|
return {'content': chunk.content, 'reasoning_content': ''}
|
||||||
|
else:
|
||||||
|
# aaa
|
||||||
|
result = {'content': '', 'reasoning_content': self.reasoning_content_chunk}
|
||||||
|
self.reasoning_content += self.reasoning_content_chunk
|
||||||
|
self.reasoning_content_chunk = ""
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def event_content(chat_id, chat_record_id, response, workflow,
|
def event_content(chat_id, chat_record_id, response, workflow,
|
||||||
write_context,
|
write_context,
|
||||||
post_handler: WorkFlowPostHandler):
|
post_handler: WorkFlowPostHandler):
|
||||||
|
|||||||
@ -470,6 +470,7 @@ class WorkflowManage:
|
|||||||
if result is not None:
|
if result is not None:
|
||||||
if self.is_result(current_node, current_result):
|
if self.is_result(current_node, current_result):
|
||||||
for r in result:
|
for r in result:
|
||||||
|
reasoning_content = ''
|
||||||
content = r
|
content = r
|
||||||
child_node = {}
|
child_node = {}
|
||||||
node_is_end = False
|
node_is_end = False
|
||||||
@ -479,9 +480,12 @@ class WorkflowManage:
|
|||||||
child_node = {'runtime_node_id': r.get('runtime_node_id'),
|
child_node = {'runtime_node_id': r.get('runtime_node_id'),
|
||||||
'chat_record_id': r.get('chat_record_id')
|
'chat_record_id': r.get('chat_record_id')
|
||||||
, 'child_node': r.get('child_node')}
|
, 'child_node': r.get('child_node')}
|
||||||
real_node_id = r.get('real_node_id')
|
if r.__contains__('real_node_id'):
|
||||||
node_is_end = r.get('node_is_end')
|
real_node_id = r.get('real_node_id')
|
||||||
|
if r.__contains__('node_is_end'):
|
||||||
|
node_is_end = r.get('node_is_end')
|
||||||
view_type = r.get('view_type')
|
view_type = r.get('view_type')
|
||||||
|
reasoning_content = r.get('reasoning_content')
|
||||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||||
self.params['chat_record_id'],
|
self.params['chat_record_id'],
|
||||||
current_node.id,
|
current_node.id,
|
||||||
@ -492,7 +496,8 @@ class WorkflowManage:
|
|||||||
'view_type': view_type,
|
'view_type': view_type,
|
||||||
'child_node': child_node,
|
'child_node': child_node,
|
||||||
'node_is_end': node_is_end,
|
'node_is_end': node_is_end,
|
||||||
'real_node_id': real_node_id})
|
'real_node_id': real_node_id,
|
||||||
|
'reasoning_content': reasoning_content})
|
||||||
current_node.node_chunk.add_chunk(chunk)
|
current_node.node_chunk.add_chunk(chunk)
|
||||||
chunk = (self.base_to_response
|
chunk = (self.base_to_response
|
||||||
.to_stream_chunk_response(self.params['chat_id'],
|
.to_stream_chunk_response(self.params['chat_id'],
|
||||||
@ -504,7 +509,8 @@ class WorkflowManage:
|
|||||||
'node_type': current_node.type,
|
'node_type': current_node.type,
|
||||||
'view_type': view_type,
|
'view_type': view_type,
|
||||||
'child_node': child_node,
|
'child_node': child_node,
|
||||||
'real_node_id': real_node_id}))
|
'real_node_id': real_node_id,
|
||||||
|
'reasoning_content': ''}))
|
||||||
current_node.node_chunk.add_chunk(chunk)
|
current_node.node_chunk.add_chunk(chunk)
|
||||||
else:
|
else:
|
||||||
list(result)
|
list(result)
|
||||||
@ -516,7 +522,7 @@ class WorkflowManage:
|
|||||||
self.params['chat_record_id'],
|
self.params['chat_record_id'],
|
||||||
current_node.id,
|
current_node.id,
|
||||||
current_node.up_node_id_list,
|
current_node.up_node_id_list,
|
||||||
str(e), False, 0, 0,
|
'Exception:' + str(e), False, 0, 0,
|
||||||
{'node_is_end': True,
|
{'node_is_end': True,
|
||||||
'runtime_node_id': current_node.runtime_node_id,
|
'runtime_node_id': current_node.runtime_node_id,
|
||||||
'node_type': current_node.type,
|
'node_type': current_node.type,
|
||||||
@ -603,20 +609,19 @@ class WorkflowManage:
|
|||||||
if len(current_answer.content) > 0:
|
if len(current_answer.content) > 0:
|
||||||
if up_node is None or current_answer.view_type == 'single_view' or (
|
if up_node is None or current_answer.view_type == 'single_view' or (
|
||||||
current_answer.view_type == 'many_view' and up_node.view_type == 'single_view'):
|
current_answer.view_type == 'many_view' and up_node.view_type == 'single_view'):
|
||||||
result.append(current_answer)
|
result.append([current_answer])
|
||||||
else:
|
else:
|
||||||
if len(result) > 0:
|
if len(result) > 0:
|
||||||
exec_index = len(result) - 1
|
exec_index = len(result) - 1
|
||||||
content = result[exec_index].content
|
if isinstance(result[exec_index], list):
|
||||||
result[exec_index].content += current_answer.content if len(
|
result[exec_index].append(current_answer)
|
||||||
content) == 0 else ('\n\n' + current_answer.content)
|
|
||||||
else:
|
else:
|
||||||
result.insert(0, current_answer)
|
result.insert(0, [current_answer])
|
||||||
up_node = current_answer
|
up_node = current_answer
|
||||||
if len(result) == 0:
|
if len(result) == 0:
|
||||||
# 如果没有响应 就响应一个空数据
|
# 如果没有响应 就响应一个空数据
|
||||||
return [Answer('', '', '', '', {}).to_dict()]
|
return [[]]
|
||||||
return [r.to_dict() for r in result]
|
return [[item.to_dict() for item in r] for r in result]
|
||||||
|
|
||||||
def get_next_node(self):
|
def get_next_node(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@ -35,7 +35,13 @@ def get_dataset_setting_dict():
|
|||||||
|
|
||||||
|
|
||||||
def get_model_setting_dict():
|
def get_model_setting_dict():
|
||||||
return {'prompt': Application.get_default_model_prompt(), 'no_references_prompt': '{question}'}
|
return {
|
||||||
|
'prompt': Application.get_default_model_prompt(),
|
||||||
|
'no_references_prompt': '{question}',
|
||||||
|
'reasoning_content_start': '<think>',
|
||||||
|
'reasoning_content_end': '</think>',
|
||||||
|
'reasoning_content_enable': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class Application(AppModelMixin):
|
class Application(AppModelMixin):
|
||||||
|
|||||||
@ -140,6 +140,13 @@ class ModelSettingSerializer(serializers.Serializer):
|
|||||||
error_messages=ErrMessage.char(_("Role prompts")))
|
error_messages=ErrMessage.char(_("Role prompts")))
|
||||||
no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True,
|
no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True,
|
||||||
error_messages=ErrMessage.char(_("No citation segmentation prompt")))
|
error_messages=ErrMessage.char(_("No citation segmentation prompt")))
|
||||||
|
reasoning_content_enable = serializers.BooleanField(required=False,
|
||||||
|
error_messages=ErrMessage.char(_("Thinking process switch")))
|
||||||
|
reasoning_content_start = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=256,
|
||||||
|
error_messages=ErrMessage.char(
|
||||||
|
_("The thinking process begins to mark")))
|
||||||
|
reasoning_content_end = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=256,
|
||||||
|
error_messages=ErrMessage.char(_("End of thinking process marker")))
|
||||||
|
|
||||||
|
|
||||||
class ApplicationWorkflowSerializer(serializers.Serializer):
|
class ApplicationWorkflowSerializer(serializers.Serializer):
|
||||||
|
|||||||
@ -22,6 +22,7 @@ from application.chat_pipeline.step.generate_human_message_step.impl.base_genera
|
|||||||
BaseGenerateHumanMessageStep
|
BaseGenerateHumanMessageStep
|
||||||
from application.chat_pipeline.step.reset_problem_step.impl.base_reset_problem_step import BaseResetProblemStep
|
from application.chat_pipeline.step.reset_problem_step.impl.base_reset_problem_step import BaseResetProblemStep
|
||||||
from application.chat_pipeline.step.search_dataset_step.impl.base_search_dataset_step import BaseSearchDatasetStep
|
from application.chat_pipeline.step.search_dataset_step.impl.base_search_dataset_step import BaseSearchDatasetStep
|
||||||
|
from application.flow.common import Answer
|
||||||
from application.flow.i_step_node import WorkFlowPostHandler
|
from application.flow.i_step_node import WorkFlowPostHandler
|
||||||
from application.flow.workflow_manage import WorkflowManage, Flow
|
from application.flow.workflow_manage import WorkflowManage, Flow
|
||||||
from application.models import ChatRecord, Chat, Application, ApplicationDatasetMapping, ApplicationTypeChoices, \
|
from application.models import ChatRecord, Chat, Application, ApplicationDatasetMapping, ApplicationTypeChoices, \
|
||||||
@ -104,6 +105,7 @@ class ChatInfo:
|
|||||||
'model_id': model_id,
|
'model_id': model_id,
|
||||||
'problem_optimization': self.application.problem_optimization,
|
'problem_optimization': self.application.problem_optimization,
|
||||||
'stream': True,
|
'stream': True,
|
||||||
|
'model_setting': model_setting,
|
||||||
'model_params_setting': model_params_setting if self.application.model_params_setting is None or len(
|
'model_params_setting': model_params_setting if self.application.model_params_setting is None or len(
|
||||||
self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting,
|
self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting,
|
||||||
'search_mode': self.application.dataset_setting.get(
|
'search_mode': self.application.dataset_setting.get(
|
||||||
@ -157,6 +159,8 @@ def get_post_handler(chat_info: ChatInfo):
|
|||||||
padding_problem_text: str = None,
|
padding_problem_text: str = None,
|
||||||
client_id=None,
|
client_id=None,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
answer_list = [[Answer(answer_text, 'ai-chat-node', 'ai-chat-node', 'ai-chat-node', {}, 'ai-chat-node',
|
||||||
|
kwargs.get('reasoning_content', '')).to_dict()]]
|
||||||
chat_record = ChatRecord(id=chat_record_id,
|
chat_record = ChatRecord(id=chat_record_id,
|
||||||
chat_id=chat_id,
|
chat_id=chat_id,
|
||||||
problem_text=problem_text,
|
problem_text=problem_text,
|
||||||
@ -164,7 +168,7 @@ def get_post_handler(chat_info: ChatInfo):
|
|||||||
details=manage.get_details(),
|
details=manage.get_details(),
|
||||||
message_tokens=manage.context['message_tokens'],
|
message_tokens=manage.context['message_tokens'],
|
||||||
answer_tokens=manage.context['answer_tokens'],
|
answer_tokens=manage.context['answer_tokens'],
|
||||||
answer_text_list=[answer_text],
|
answer_text_list=answer_list,
|
||||||
run_time=manage.context['run_time'],
|
run_time=manage.context['run_time'],
|
||||||
index=len(chat_info.chat_record_list) + 1)
|
index=len(chat_info.chat_record_list) + 1)
|
||||||
chat_info.append_chat_record(chat_record, client_id)
|
chat_info.append_chat_record(chat_record, client_id)
|
||||||
@ -242,15 +246,18 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||||||
runtime_node_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
runtime_node_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
|
||||||
error_messages=ErrMessage.char(_("Runtime node id")))
|
error_messages=ErrMessage.char(_("Runtime node id")))
|
||||||
|
|
||||||
node_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.char(_("Node parameters")))
|
node_data = serializers.DictField(required=False, allow_null=True,
|
||||||
application_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Application ID")))
|
error_messages=ErrMessage.char(_("Node parameters")))
|
||||||
|
application_id = serializers.UUIDField(required=False, allow_null=True,
|
||||||
|
error_messages=ErrMessage.uuid(_("Application ID")))
|
||||||
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
|
||||||
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
|
||||||
form_data = serializers.DictField(required=False, error_messages=ErrMessage.char(_("Global variables")))
|
form_data = serializers.DictField(required=False, error_messages=ErrMessage.char(_("Global variables")))
|
||||||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
|
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
|
||||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
|
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
|
||||||
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
|
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
|
||||||
child_node = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Child Nodes")))
|
child_node = serializers.DictField(required=False, allow_null=True,
|
||||||
|
error_messages=ErrMessage.dict(_("Child Nodes")))
|
||||||
|
|
||||||
def is_valid_application_workflow(self, *, raise_exception=False):
|
def is_valid_application_workflow(self, *, raise_exception=False):
|
||||||
self.is_valid_intraday_access_num()
|
self.is_valid_intraday_access_num()
|
||||||
|
|||||||
@ -1,12 +1,16 @@
|
|||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
|
import warnings
|
||||||
|
from typing import List, Dict, Optional, Any, Iterator, cast, Type, Union
|
||||||
|
|
||||||
from typing import List, Dict, Optional, Any, Iterator, cast
|
import openai
|
||||||
|
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||||
from langchain_core.language_models import LanguageModelInput
|
from langchain_core.language_models import LanguageModelInput
|
||||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, AIMessageChunk
|
||||||
from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
|
from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
|
||||||
from langchain_core.runnables import RunnableConfig, ensure_config
|
from langchain_core.runnables import RunnableConfig, ensure_config
|
||||||
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
|
from langchain_openai.chat_models.base import _convert_chunk_to_generation_chunk
|
||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
|
|
||||||
@ -36,14 +40,101 @@ class BaseChatOpenAI(ChatOpenAI):
|
|||||||
return self.get_last_generation_info().get('output_tokens', 0)
|
return self.get_last_generation_info().get('output_tokens', 0)
|
||||||
|
|
||||||
def _stream(
|
def _stream(
|
||||||
self, *args: Any, stream_usage: Optional[bool] = None, **kwargs: Any
|
self,
|
||||||
|
messages: List[BaseMessage],
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> Iterator[ChatGenerationChunk]:
|
) -> Iterator[ChatGenerationChunk]:
|
||||||
|
|
||||||
|
"""Set default stream_options."""
|
||||||
|
stream_usage = self._should_stream_usage(kwargs.get('stream_usage'), **kwargs)
|
||||||
|
# Note: stream_options is not a valid parameter for Azure OpenAI.
|
||||||
|
# To support users proxying Azure through ChatOpenAI, here we only specify
|
||||||
|
# stream_options if include_usage is set to True.
|
||||||
|
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
|
||||||
|
# for release notes.
|
||||||
|
if stream_usage:
|
||||||
|
kwargs["stream_options"] = {"include_usage": stream_usage}
|
||||||
|
|
||||||
kwargs["stream"] = True
|
kwargs["stream"] = True
|
||||||
kwargs["stream_options"] = {"include_usage": True}
|
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||||
for chunk in super()._stream(*args, stream_usage=stream_usage, **kwargs):
|
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
|
||||||
if chunk.message.usage_metadata is not None:
|
base_generation_info = {}
|
||||||
self.usage_metadata = chunk.message.usage_metadata
|
|
||||||
yield chunk
|
if "response_format" in payload and is_basemodel_subclass(
|
||||||
|
payload["response_format"]
|
||||||
|
):
|
||||||
|
# TODO: Add support for streaming with Pydantic response_format.
|
||||||
|
warnings.warn("Streaming with Pydantic response_format not yet supported.")
|
||||||
|
chat_result = self._generate(
|
||||||
|
messages, stop, run_manager=run_manager, **kwargs
|
||||||
|
)
|
||||||
|
msg = chat_result.generations[0].message
|
||||||
|
yield ChatGenerationChunk(
|
||||||
|
message=AIMessageChunk(
|
||||||
|
**msg.dict(exclude={"type", "additional_kwargs"}),
|
||||||
|
# preserve the "parsed" Pydantic object without converting to dict
|
||||||
|
additional_kwargs=msg.additional_kwargs,
|
||||||
|
),
|
||||||
|
generation_info=chat_result.generations[0].generation_info,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
if self.include_response_headers:
|
||||||
|
raw_response = self.client.with_raw_response.create(**payload)
|
||||||
|
response = raw_response.parse()
|
||||||
|
base_generation_info = {"headers": dict(raw_response.headers)}
|
||||||
|
else:
|
||||||
|
response = self.client.create(**payload)
|
||||||
|
with response:
|
||||||
|
is_first_chunk = True
|
||||||
|
for chunk in response:
|
||||||
|
if not isinstance(chunk, dict):
|
||||||
|
chunk = chunk.model_dump()
|
||||||
|
|
||||||
|
generation_chunk = _convert_chunk_to_generation_chunk(
|
||||||
|
chunk,
|
||||||
|
default_chunk_class,
|
||||||
|
base_generation_info if is_first_chunk else {},
|
||||||
|
)
|
||||||
|
if generation_chunk is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# custom code
|
||||||
|
if generation_chunk.message.usage_metadata is not None:
|
||||||
|
self.usage_metadata = generation_chunk.message.usage_metadata
|
||||||
|
# custom code
|
||||||
|
if 'reasoning_content' in chunk['choices'][0]['delta']:
|
||||||
|
generation_chunk.message.additional_kwargs["reasoning_content"] = chunk['choices'][0]['delta'][
|
||||||
|
'reasoning_content']
|
||||||
|
|
||||||
|
default_chunk_class = generation_chunk.message.__class__
|
||||||
|
logprobs = (generation_chunk.generation_info or {}).get("logprobs")
|
||||||
|
if run_manager:
|
||||||
|
run_manager.on_llm_new_token(
|
||||||
|
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
|
||||||
|
)
|
||||||
|
is_first_chunk = False
|
||||||
|
yield generation_chunk
|
||||||
|
|
||||||
|
def _create_chat_result(self,
|
||||||
|
response: Union[dict, openai.BaseModel],
|
||||||
|
generation_info: Optional[Dict] = None):
|
||||||
|
result = super()._create_chat_result(response, generation_info)
|
||||||
|
try:
|
||||||
|
reasoning_content = ''
|
||||||
|
reasoning_content_enable = False
|
||||||
|
for res in response.choices:
|
||||||
|
if 'reasoning_content' in res.message.model_extra:
|
||||||
|
reasoning_content_enable = True
|
||||||
|
_reasoning_content = res.message.model_extra.get('reasoning_content')
|
||||||
|
if _reasoning_content is not None:
|
||||||
|
reasoning_content += _reasoning_content
|
||||||
|
if reasoning_content_enable:
|
||||||
|
result.llm_output['reasoning_content'] = reasoning_content
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
return result
|
||||||
|
|
||||||
def invoke(
|
def invoke(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@ -13,6 +13,7 @@ from langchain_openai.chat_models import ChatOpenAI
|
|||||||
|
|
||||||
from common.config.tokenizer_manage_config import TokenizerManage
|
from common.config.tokenizer_manage_config import TokenizerManage
|
||||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||||
|
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||||
|
|
||||||
|
|
||||||
def custom_get_token_ids(text: str):
|
def custom_get_token_ids(text: str):
|
||||||
@ -20,7 +21,7 @@ def custom_get_token_ids(text: str):
|
|||||||
return tokenizer.encode(text)
|
return tokenizer.encode(text)
|
||||||
|
|
||||||
|
|
||||||
class OpenAIChatModel(MaxKBBaseModel, ChatOpenAI):
|
class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def is_cache_model():
|
def is_cache_model():
|
||||||
|
|||||||
@ -29,6 +29,7 @@ interface Chunk {
|
|||||||
chat_id: string
|
chat_id: string
|
||||||
chat_record_id: string
|
chat_record_id: string
|
||||||
content: string
|
content: string
|
||||||
|
reasoning_content: string
|
||||||
node_id: string
|
node_id: string
|
||||||
up_node_id: string
|
up_node_id: string
|
||||||
is_end: boolean
|
is_end: boolean
|
||||||
@ -43,12 +44,16 @@ interface chatType {
|
|||||||
problem_text: string
|
problem_text: string
|
||||||
answer_text: string
|
answer_text: string
|
||||||
buffer: Array<String>
|
buffer: Array<String>
|
||||||
answer_text_list: Array<{
|
answer_text_list: Array<
|
||||||
content: string
|
Array<{
|
||||||
chat_record_id?: string
|
content: string
|
||||||
runtime_node_id?: string
|
reasoning_content: string
|
||||||
child_node?: any
|
chat_record_id?: string
|
||||||
}>
|
runtime_node_id?: string
|
||||||
|
child_node?: any
|
||||||
|
real_node_id?: string
|
||||||
|
}>
|
||||||
|
>
|
||||||
/**
|
/**
|
||||||
* 是否写入结束
|
* 是否写入结束
|
||||||
*/
|
*/
|
||||||
@ -83,6 +88,7 @@ interface WriteNodeInfo {
|
|||||||
answer_text_list_index: number
|
answer_text_list_index: number
|
||||||
current_up_node?: any
|
current_up_node?: any
|
||||||
divider_content?: Array<string>
|
divider_content?: Array<string>
|
||||||
|
divider_reasoning_content?: Array<string>
|
||||||
}
|
}
|
||||||
export class ChatRecordManage {
|
export class ChatRecordManage {
|
||||||
id?: any
|
id?: any
|
||||||
@ -105,20 +111,38 @@ export class ChatRecordManage {
|
|||||||
}
|
}
|
||||||
append_answer(
|
append_answer(
|
||||||
chunk_answer: string,
|
chunk_answer: string,
|
||||||
|
reasoning_content: string,
|
||||||
index?: number,
|
index?: number,
|
||||||
chat_record_id?: string,
|
chat_record_id?: string,
|
||||||
runtime_node_id?: string,
|
runtime_node_id?: string,
|
||||||
child_node?: any
|
child_node?: any,
|
||||||
|
real_node_id?: string
|
||||||
) {
|
) {
|
||||||
const set_index = index != undefined ? index : this.chat.answer_text_list.length - 1
|
if (chunk_answer || reasoning_content) {
|
||||||
const content = this.chat.answer_text_list[set_index]
|
const set_index = index != undefined ? index : this.chat.answer_text_list.length - 1
|
||||||
? this.chat.answer_text_list[set_index].content + chunk_answer
|
let card_list = this.chat.answer_text_list[set_index]
|
||||||
: chunk_answer
|
if (!card_list) {
|
||||||
this.chat.answer_text_list[set_index] = {
|
card_list = []
|
||||||
content: content,
|
this.chat.answer_text_list[set_index] = card_list
|
||||||
chat_record_id,
|
}
|
||||||
runtime_node_id,
|
const answer_value = card_list.find((item) => item.real_node_id == real_node_id)
|
||||||
child_node
|
const content = answer_value ? answer_value.content + chunk_answer : chunk_answer
|
||||||
|
const _reasoning_content = answer_value
|
||||||
|
? answer_value.reasoning_content + reasoning_content
|
||||||
|
: reasoning_content
|
||||||
|
if (answer_value) {
|
||||||
|
answer_value.content = content
|
||||||
|
answer_value.reasoning_content = _reasoning_content
|
||||||
|
} else {
|
||||||
|
card_list.push({
|
||||||
|
content: content,
|
||||||
|
reasoning_content: _reasoning_content,
|
||||||
|
chat_record_id,
|
||||||
|
runtime_node_id,
|
||||||
|
child_node,
|
||||||
|
real_node_id
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.chat.answer_text = this.chat.answer_text + chunk_answer
|
this.chat.answer_text = this.chat.answer_text + chunk_answer
|
||||||
@ -155,7 +179,7 @@ export class ChatRecordManage {
|
|||||||
) {
|
) {
|
||||||
const none_index = this.findIndex(
|
const none_index = this.findIndex(
|
||||||
this.chat.answer_text_list,
|
this.chat.answer_text_list,
|
||||||
(item) => item.content == '',
|
(item) => (item.length == 1 && item[0].content == '') || item.length == 0,
|
||||||
'index'
|
'index'
|
||||||
)
|
)
|
||||||
if (none_index > -1) {
|
if (none_index > -1) {
|
||||||
@ -166,7 +190,7 @@ export class ChatRecordManage {
|
|||||||
} else {
|
} else {
|
||||||
const none_index = this.findIndex(
|
const none_index = this.findIndex(
|
||||||
this.chat.answer_text_list,
|
this.chat.answer_text_list,
|
||||||
(item) => item.content === '',
|
(item) => (item.length == 1 && item[0].content == '') || item.length == 0,
|
||||||
'index'
|
'index'
|
||||||
)
|
)
|
||||||
if (none_index > -1) {
|
if (none_index > -1) {
|
||||||
@ -178,10 +202,10 @@ export class ChatRecordManage {
|
|||||||
|
|
||||||
this.write_node_info = {
|
this.write_node_info = {
|
||||||
current_node: run_node,
|
current_node: run_node,
|
||||||
divider_content: ['\n\n'],
|
|
||||||
current_up_node: current_up_node,
|
current_up_node: current_up_node,
|
||||||
answer_text_list_index: answer_text_list_index
|
answer_text_list_index: answer_text_list_index
|
||||||
}
|
}
|
||||||
|
|
||||||
return this.write_node_info
|
return this.write_node_info
|
||||||
}
|
}
|
||||||
return undefined
|
return undefined
|
||||||
@ -210,7 +234,7 @@ export class ChatRecordManage {
|
|||||||
}
|
}
|
||||||
const last_index = this.findIndex(
|
const last_index = this.findIndex(
|
||||||
this.chat.answer_text_list,
|
this.chat.answer_text_list,
|
||||||
(item) => item.content == '',
|
(item) => (item.length == 1 && item[0].content == '') || item.length == 0,
|
||||||
'last'
|
'last'
|
||||||
)
|
)
|
||||||
if (last_index > 0) {
|
if (last_index > 0) {
|
||||||
@ -234,7 +258,8 @@ export class ChatRecordManage {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
const { current_node, answer_text_list_index, divider_content } = node_info
|
const { current_node, answer_text_list_index } = node_info
|
||||||
|
|
||||||
if (current_node.buffer.length > 20) {
|
if (current_node.buffer.length > 20) {
|
||||||
const context = current_node.is_end
|
const context = current_node.is_end
|
||||||
? current_node.buffer.splice(0)
|
? current_node.buffer.splice(0)
|
||||||
@ -242,12 +267,20 @@ export class ChatRecordManage {
|
|||||||
0,
|
0,
|
||||||
current_node.is_end ? undefined : current_node.buffer.length - 20
|
current_node.is_end ? undefined : current_node.buffer.length - 20
|
||||||
)
|
)
|
||||||
|
const reasoning_content = current_node.is_end
|
||||||
|
? current_node.reasoning_content_buffer.splice(0)
|
||||||
|
: current_node.reasoning_content_buffer.splice(
|
||||||
|
0,
|
||||||
|
current_node.is_end ? undefined : current_node.reasoning_content_buffer.length - 20
|
||||||
|
)
|
||||||
this.append_answer(
|
this.append_answer(
|
||||||
(divider_content ? divider_content.splice(0).join('') : '') + context.join(''),
|
context.join(''),
|
||||||
|
reasoning_content.join(''),
|
||||||
answer_text_list_index,
|
answer_text_list_index,
|
||||||
current_node.chat_record_id,
|
current_node.chat_record_id,
|
||||||
current_node.runtime_node_id,
|
current_node.runtime_node_id,
|
||||||
current_node.child_node
|
current_node.child_node,
|
||||||
|
current_node.real_node_id
|
||||||
)
|
)
|
||||||
} else if (this.is_close) {
|
} else if (this.is_close) {
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -257,27 +290,46 @@ export class ChatRecordManage {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
this.append_answer(
|
this.append_answer(
|
||||||
(node_info.divider_content ? node_info.divider_content.splice(0).join('') : '') +
|
node_info.current_node.buffer.splice(0).join(''),
|
||||||
node_info.current_node.buffer.splice(0).join(''),
|
node_info.current_node.reasoning_content_buffer.splice(0).join(''),
|
||||||
node_info.answer_text_list_index,
|
node_info.answer_text_list_index,
|
||||||
node_info.current_node.chat_record_id,
|
node_info.current_node.chat_record_id,
|
||||||
node_info.current_node.runtime_node_id,
|
node_info.current_node.runtime_node_id,
|
||||||
node_info.current_node.child_node
|
node_info.current_node.child_node,
|
||||||
|
node_info.current_node.real_node_id
|
||||||
)
|
)
|
||||||
if (node_info.current_node.buffer.length == 0) {
|
|
||||||
|
if (
|
||||||
|
node_info.current_node.buffer.length == 0 &&
|
||||||
|
node_info.current_node.reasoning_content_buffer.length == 0
|
||||||
|
) {
|
||||||
node_info.current_node.is_end = true
|
node_info.current_node.is_end = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
this.closeInterval()
|
this.closeInterval()
|
||||||
} else {
|
} else {
|
||||||
const s = current_node.buffer.shift()
|
const s = current_node.buffer.shift()
|
||||||
|
const reasoning_content = current_node.reasoning_content_buffer.shift()
|
||||||
if (s !== undefined) {
|
if (s !== undefined) {
|
||||||
this.append_answer(
|
this.append_answer(
|
||||||
(divider_content ? divider_content.splice(0).join('') : '') + s,
|
s,
|
||||||
|
'',
|
||||||
answer_text_list_index,
|
answer_text_list_index,
|
||||||
current_node.chat_record_id,
|
current_node.chat_record_id,
|
||||||
current_node.runtime_node_id,
|
current_node.runtime_node_id,
|
||||||
current_node.child_node
|
current_node.child_node,
|
||||||
|
current_node.real_node_id
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if (reasoning_content !== undefined) {
|
||||||
|
this.append_answer(
|
||||||
|
'',
|
||||||
|
reasoning_content,
|
||||||
|
answer_text_list_index,
|
||||||
|
current_node.chat_record_id,
|
||||||
|
current_node.runtime_node_id,
|
||||||
|
current_node.child_node,
|
||||||
|
current_node.real_node_id
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -303,9 +355,15 @@ export class ChatRecordManage {
|
|||||||
if (n) {
|
if (n) {
|
||||||
n.buffer.push(...chunk.content)
|
n.buffer.push(...chunk.content)
|
||||||
n.content += chunk.content
|
n.content += chunk.content
|
||||||
|
if (chunk.reasoning_content) {
|
||||||
|
n.reasoning_content_buffer.push(...chunk.reasoning_content)
|
||||||
|
n.reasoning_content += chunk.reasoning_content
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
n = {
|
n = {
|
||||||
buffer: [...chunk.content],
|
buffer: [...chunk.content],
|
||||||
|
reasoning_content_buffer: chunk.reasoning_content ? [...chunk.reasoning_content] : [],
|
||||||
|
reasoning_content: chunk.reasoning_content ? chunk.reasoning_content : '',
|
||||||
content: chunk.content,
|
content: chunk.content,
|
||||||
real_node_id: chunk.real_node_id,
|
real_node_id: chunk.real_node_id,
|
||||||
node_id: chunk.node_id,
|
node_id: chunk.node_id,
|
||||||
@ -324,13 +382,18 @@ export class ChatRecordManage {
|
|||||||
n['is_end'] = true
|
n['is_end'] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
append(answer_text_block: string) {
|
append(answer_text_block: string, reasoning_content?: string) {
|
||||||
let set_index = this.findIndex(
|
let set_index = this.findIndex(
|
||||||
this.chat.answer_text_list,
|
this.chat.answer_text_list,
|
||||||
(item) => item.content == '',
|
(item) => item.length == 1 && item[0].content == '',
|
||||||
'index'
|
'index'
|
||||||
)
|
)
|
||||||
this.chat.answer_text_list[set_index] = { content: answer_text_block }
|
this.chat.answer_text_list[set_index] = [
|
||||||
|
{
|
||||||
|
content: answer_text_block,
|
||||||
|
reasoning_content: reasoning_content ? reasoning_content : ''
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,10 +409,10 @@ export class ChatManagement {
|
|||||||
chatRecord.appendChunk(chunk)
|
chatRecord.appendChunk(chunk)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
static append(chatRecordId: string, content: string) {
|
static append(chatRecordId: string, content: string, reasoning_content?: string) {
|
||||||
const chatRecord = this.chatMessageContainer[chatRecordId]
|
const chatRecord = this.chatMessageContainer[chatRecordId]
|
||||||
if (chatRecord) {
|
if (chatRecord) {
|
||||||
chatRecord.append(content)
|
chatRecord.append(content, reasoning_content)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
static updateStatus(chatRecordId: string, code: number) {
|
static updateStatus(chatRecordId: string, code: number) {
|
||||||
|
|||||||
@ -214,6 +214,17 @@
|
|||||||
{{ item.question || '-' }}
|
{{ item.question || '-' }}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div
|
||||||
|
class="card-never border-r-4 mt-8"
|
||||||
|
v-if="item.type == WorkflowType.AiChat"
|
||||||
|
>
|
||||||
|
<h5 class="p-8-12">
|
||||||
|
{{ $t('views.applicationWorkflow.nodes.aiChatNode.think')}}
|
||||||
|
</h5>
|
||||||
|
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||||
|
{{ item.reasoning_content || '-' }}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<div class="card-never border-r-4 mt-8">
|
<div class="card-never border-r-4 mt-8">
|
||||||
<h5 class="p-8-12">
|
<h5 class="p-8-12">
|
||||||
{{
|
{{
|
||||||
|
|||||||
@ -10,19 +10,23 @@
|
|||||||
<MdRenderer
|
<MdRenderer
|
||||||
v-if="
|
v-if="
|
||||||
(chatRecord.write_ed === undefined || chatRecord.write_ed === true) &&
|
(chatRecord.write_ed === undefined || chatRecord.write_ed === true) &&
|
||||||
!answer_text.content
|
answer_text.length == 0
|
||||||
"
|
"
|
||||||
:source="$t('chat.tip.answerMessage')"
|
:source="$t('chat.tip.answerMessage')"
|
||||||
></MdRenderer>
|
></MdRenderer>
|
||||||
<MdRenderer
|
<template v-else-if="answer_text.length > 0">
|
||||||
:chat_record_id="answer_text.chat_record_id"
|
<MdRenderer
|
||||||
:child_node="answer_text.child_node"
|
v-for="(answer, index) in answer_text"
|
||||||
:runtime_node_id="answer_text.runtime_node_id"
|
:key="index"
|
||||||
:disabled="loading || type == 'log'"
|
:chat_record_id="answer.chat_record_id"
|
||||||
v-else-if="answer_text.content"
|
:child_node="answer.child_node"
|
||||||
:source="answer_text.content"
|
:runtime_node_id="answer.runtime_node_id"
|
||||||
:send-message="chatMessage"
|
:reasoning_content="answer.reasoning_content"
|
||||||
></MdRenderer>
|
:disabled="loading || type == 'log'"
|
||||||
|
:source="answer.content"
|
||||||
|
:send-message="chatMessage"
|
||||||
|
></MdRenderer>
|
||||||
|
</template>
|
||||||
<span v-else-if="chatRecord.is_stop" shadow="always" class="dialog-card">
|
<span v-else-if="chatRecord.is_stop" shadow="always" class="dialog-card">
|
||||||
{{ $t('chat.tip.stopAnswer') }}
|
{{ $t('chat.tip.stopAnswer') }}
|
||||||
</span>
|
</span>
|
||||||
@ -90,9 +94,20 @@ const openControl = (event: any) => {
|
|||||||
const answer_text_list = computed(() => {
|
const answer_text_list = computed(() => {
|
||||||
return props.chatRecord.answer_text_list.map((item) => {
|
return props.chatRecord.answer_text_list.map((item) => {
|
||||||
if (typeof item == 'string') {
|
if (typeof item == 'string') {
|
||||||
return { content: item }
|
return [
|
||||||
|
{
|
||||||
|
content: item,
|
||||||
|
chat_record_id: undefined,
|
||||||
|
child_node: undefined,
|
||||||
|
runtime_node_id: undefined,
|
||||||
|
reasoning_content: undefined
|
||||||
|
}
|
||||||
|
]
|
||||||
|
} else if (item instanceof Array) {
|
||||||
|
return item
|
||||||
|
} else {
|
||||||
|
return [item]
|
||||||
}
|
}
|
||||||
return item
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@ -7,7 +7,11 @@
|
|||||||
</div>
|
</div>
|
||||||
<div class="content" v-if="prologue">
|
<div class="content" v-if="prologue">
|
||||||
<el-card shadow="always" class="dialog-card" style="--el-card-padding: 10px 16px 12px">
|
<el-card shadow="always" class="dialog-card" style="--el-card-padding: 10px 16px 12px">
|
||||||
<MdRenderer :source="prologue" :send-message="sendMessage"></MdRenderer>
|
<MdRenderer
|
||||||
|
:source="prologue"
|
||||||
|
:send-message="sendMessage"
|
||||||
|
reasoning_content=""
|
||||||
|
></MdRenderer>
|
||||||
</el-card>
|
</el-card>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -27,9 +31,7 @@ const toQuickQuestion = (match: string, offset: number, input: string) => {
|
|||||||
return `<quick_question>${match.replace('- ', '')}</quick_question>`
|
return `<quick_question>${match.replace('- ', '')}</quick_question>`
|
||||||
}
|
}
|
||||||
const prologue = computed(() => {
|
const prologue = computed(() => {
|
||||||
const temp = props.available
|
const temp = props.available ? props.application?.prologue : t('chat.tip.prologueMessage')
|
||||||
? props.application?.prologue
|
|
||||||
: t('chat.tip.prologueMessage')
|
|
||||||
return temp?.replace(/-\s.+/g, toQuickQuestion)
|
return temp?.replace(/-\s.+/g, toQuickQuestion)
|
||||||
})
|
})
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@ -284,8 +284,10 @@ function chatMessage(chat?: any, problem?: string, re_chat?: boolean, other_para
|
|||||||
id: randomId(),
|
id: randomId(),
|
||||||
problem_text: problem ? problem : inputValue.value.trim(),
|
problem_text: problem ? problem : inputValue.value.trim(),
|
||||||
answer_text: '',
|
answer_text: '',
|
||||||
answer_text_list: [{ content: '' }],
|
answer_text_list: [[]],
|
||||||
buffer: [],
|
buffer: [],
|
||||||
|
reasoning_content: '',
|
||||||
|
reasoning_content_buffer: [],
|
||||||
write_ed: false,
|
write_ed: false,
|
||||||
is_stop: false,
|
is_stop: false,
|
||||||
record_id: '',
|
record_id: '',
|
||||||
|
|||||||
@ -1,40 +1,44 @@
|
|||||||
<template>
|
<template>
|
||||||
<template v-for="(item, index) in md_view_list" :key="index">
|
<div>
|
||||||
<div
|
<!-- 推理过程组件 -->
|
||||||
v-if="item.type === 'question'"
|
<ReasoningRander :content="reasoning_content" v-if="reasoning_content?.trim()" />
|
||||||
@click="sendMessage ? sendMessage(item.content, 'new') : (content: string) => {}"
|
<template v-for="(item, index) in md_view_list" :key="index">
|
||||||
class="problem-button ellipsis-2 mt-4 mb-4"
|
<div
|
||||||
:class="sendMessage ? 'cursor' : 'disabled'"
|
v-if="item.type === 'question'"
|
||||||
>
|
@click="sendMessage ? sendMessage(item.content, 'new') : (content: string) => {}"
|
||||||
<el-icon>
|
class="problem-button ellipsis-2 mt-4 mb-4"
|
||||||
<EditPen />
|
:class="sendMessage ? 'cursor' : 'disabled'"
|
||||||
</el-icon>
|
>
|
||||||
{{ item.content }}
|
<el-icon>
|
||||||
</div>
|
<EditPen />
|
||||||
<HtmlRander v-else-if="item.type === 'html_rander'" :source="item.content"></HtmlRander>
|
</el-icon>
|
||||||
<EchartsRander
|
{{ item.content }}
|
||||||
v-else-if="item.type === 'echarts_rander'"
|
</div>
|
||||||
:option="item.content"
|
<HtmlRander v-else-if="item.type === 'html_rander'" :source="item.content"></HtmlRander>
|
||||||
></EchartsRander>
|
<EchartsRander
|
||||||
<FormRander
|
v-else-if="item.type === 'echarts_rander'"
|
||||||
:chat_record_id="chat_record_id"
|
:option="item.content"
|
||||||
:runtime_node_id="runtime_node_id"
|
></EchartsRander>
|
||||||
:child_node="child_node"
|
<FormRander
|
||||||
:disabled="disabled"
|
:chat_record_id="chat_record_id"
|
||||||
:send-message="sendMessage"
|
:runtime_node_id="runtime_node_id"
|
||||||
v-else-if="item.type === 'form_rander'"
|
:child_node="child_node"
|
||||||
:form_setting="item.content"
|
:disabled="disabled"
|
||||||
></FormRander>
|
:send-message="sendMessage"
|
||||||
<MdPreview
|
v-else-if="item.type === 'form_rander'"
|
||||||
v-else
|
:form_setting="item.content"
|
||||||
noIconfont
|
></FormRander>
|
||||||
ref="editorRef"
|
<MdPreview
|
||||||
editorId="preview-only"
|
v-else
|
||||||
:modelValue="item.content"
|
noIconfont
|
||||||
:key="index"
|
ref="editorRef"
|
||||||
class="maxkb-md"
|
editorId="preview-only"
|
||||||
/>
|
:modelValue="item.content"
|
||||||
</template>
|
:key="index"
|
||||||
|
class="maxkb-md"
|
||||||
|
/>
|
||||||
|
</template>
|
||||||
|
</div>
|
||||||
</template>
|
</template>
|
||||||
<script setup lang="ts">
|
<script setup lang="ts">
|
||||||
import { computed, ref } from 'vue'
|
import { computed, ref } from 'vue'
|
||||||
@ -42,6 +46,7 @@ import { config } from 'md-editor-v3'
|
|||||||
import HtmlRander from './HtmlRander.vue'
|
import HtmlRander from './HtmlRander.vue'
|
||||||
import EchartsRander from './EchartsRander.vue'
|
import EchartsRander from './EchartsRander.vue'
|
||||||
import FormRander from './FormRander.vue'
|
import FormRander from './FormRander.vue'
|
||||||
|
import ReasoningRander from './ReasoningRander.vue'
|
||||||
config({
|
config({
|
||||||
markdownItConfig(md) {
|
markdownItConfig(md) {
|
||||||
md.renderer.rules.image = (tokens, idx, options, env, self) => {
|
md.renderer.rules.image = (tokens, idx, options, env, self) => {
|
||||||
@ -65,6 +70,7 @@ config({
|
|||||||
const props = withDefaults(
|
const props = withDefaults(
|
||||||
defineProps<{
|
defineProps<{
|
||||||
source?: string
|
source?: string
|
||||||
|
reasoning_content?: string
|
||||||
inner_suffix?: boolean
|
inner_suffix?: boolean
|
||||||
sendMessage?: (question: string, type: 'old' | 'new', other_params_data?: any) => void
|
sendMessage?: (question: string, type: 'old' | 'new', other_params_data?: any) => void
|
||||||
child_node?: any
|
child_node?: any
|
||||||
|
|||||||
36
ui/src/components/markdown/ReasoningRander.vue
Normal file
36
ui/src/components/markdown/ReasoningRander.vue
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
<template>
|
||||||
|
<div class="reasoning">
|
||||||
|
<el-button text @click="showThink = !showThink" class="reasoning-button">
|
||||||
|
{{ $t('views.applicationWorkflow.nodes.aiChatNode.think') }}
|
||||||
|
<el-icon class="ml-4" :class="showThink ? 'rotate-180' : ''"><ArrowDownBold /> </el-icon>
|
||||||
|
</el-button>
|
||||||
|
<el-collapse-transition>
|
||||||
|
<div class="border-l mt-8" v-show="showThink">
|
||||||
|
<MdPreview
|
||||||
|
noIconfont
|
||||||
|
ref="editorRef"
|
||||||
|
editorId="preview-only"
|
||||||
|
:modelValue="content"
|
||||||
|
class="reasoning-md"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</el-collapse-transition>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
<script lang="ts" setup>
|
||||||
|
import { ref } from 'vue'
|
||||||
|
const props = defineProps<{ content?: string }>()
|
||||||
|
const showThink = ref<boolean>(true)
|
||||||
|
</script>
|
||||||
|
<style lang="scss" scoped>
|
||||||
|
.reasoning {
|
||||||
|
.reasoning-button {
|
||||||
|
font-size: 14px;
|
||||||
|
color: var(--app-text-color-secondary) !important;
|
||||||
|
}
|
||||||
|
.reasoning-md {
|
||||||
|
padding-left: 8px;
|
||||||
|
--md-color: var(--app-text-color-secondary) !important;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
@ -113,7 +113,8 @@ export default {
|
|||||||
tooltip: `If turned off, the content of this node will not be output to the user.
|
tooltip: `If turned off, the content of this node will not be output to the user.
|
||||||
If you want the user to see the output of this node, please turn on the switch.`
|
If you want the user to see the output of this node, please turn on the switch.`
|
||||||
},
|
},
|
||||||
defaultPrompt: 'Known Information'
|
defaultPrompt: 'Known Information',
|
||||||
|
think: 'Thinking Process',
|
||||||
},
|
},
|
||||||
searchDatasetNode: {
|
searchDatasetNode: {
|
||||||
label: 'Knowledge Retrieval',
|
label: 'Knowledge Retrieval',
|
||||||
|
|||||||
@ -61,7 +61,8 @@ export default {
|
|||||||
references: ' (References Knowledge)',
|
references: ' (References Knowledge)',
|
||||||
placeholder: 'Please enter prompt',
|
placeholder: 'Please enter prompt',
|
||||||
requiredMessage: 'Please enter prompt',
|
requiredMessage: 'Please enter prompt',
|
||||||
tooltip:'By adjusting the content of the prompt, you can guide the direction of the large model chat.',
|
tooltip:
|
||||||
|
'By adjusting the content of the prompt, you can guide the direction of the large model chat.',
|
||||||
|
|
||||||
noReferencesTooltip:
|
noReferencesTooltip:
|
||||||
'By adjusting the content of the prompt, you can guide the direction of the large model chat. This prompt will be fixed at the beginning of the context. Variables used: {question} is the question posed by the user.',
|
'By adjusting the content of the prompt, you can guide the direction of the large model chat. This prompt will be fixed at the beginning of the context. Variables used: {question} is the question posed by the user.',
|
||||||
@ -105,6 +106,13 @@ export default {
|
|||||||
browser: 'Browser playback (free)',
|
browser: 'Browser playback (free)',
|
||||||
tts: 'TTS Model',
|
tts: 'TTS Model',
|
||||||
listeningTest: 'Preview'
|
listeningTest: 'Preview'
|
||||||
|
},
|
||||||
|
reasoningContent: {
|
||||||
|
label: 'Output Thinking',
|
||||||
|
tooltip:
|
||||||
|
'According to the thinking tags set by the model, the content between the tags will be considered as the thought process.',
|
||||||
|
start: 'Start',
|
||||||
|
end: 'End'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
buttons: {
|
buttons: {
|
||||||
|
|||||||
@ -114,7 +114,8 @@ export default {
|
|||||||
tooltip: `关闭后该节点的内容则不输出给用户。
|
tooltip: `关闭后该节点的内容则不输出给用户。
|
||||||
如果你想让用户看到该节点的输出内容,请打开开关。`
|
如果你想让用户看到该节点的输出内容,请打开开关。`
|
||||||
},
|
},
|
||||||
defaultPrompt: '已知信息'
|
defaultPrompt: '已知信息',
|
||||||
|
think: '思考过程',
|
||||||
},
|
},
|
||||||
searchDatasetNode: {
|
searchDatasetNode: {
|
||||||
label: '知识库检索',
|
label: '知识库检索',
|
||||||
|
|||||||
@ -55,7 +55,8 @@ export default {
|
|||||||
references: ' (引用知识库)',
|
references: ' (引用知识库)',
|
||||||
placeholder: '请输入提示词',
|
placeholder: '请输入提示词',
|
||||||
requiredMessage: '请输入提示词',
|
requiredMessage: '请输入提示词',
|
||||||
tooltip:'通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头,可以使用变量。',
|
tooltip:
|
||||||
|
'通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头,可以使用变量。',
|
||||||
noReferencesTooltip:
|
noReferencesTooltip:
|
||||||
'通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头。可以使用变量:{question} 是用户提出问题的占位符。',
|
'通过调整提示词内容,可以引导大模型聊天方向,该提示词会被固定在上下文的开头。可以使用变量:{question} 是用户提出问题的占位符。',
|
||||||
referencesTooltip:
|
referencesTooltip:
|
||||||
@ -96,6 +97,12 @@ export default {
|
|||||||
browser: '浏览器播放(免费)',
|
browser: '浏览器播放(免费)',
|
||||||
tts: 'TTS模型',
|
tts: 'TTS模型',
|
||||||
listeningTest: '试听'
|
listeningTest: '试听'
|
||||||
|
},
|
||||||
|
reasoningContent: {
|
||||||
|
label: '输出思考',
|
||||||
|
tooltip: '请根据模型返回的思考标签设置,标签中间的内容将为认定为思考过程',
|
||||||
|
start: '开始',
|
||||||
|
end: '结束'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
buttons: {
|
buttons: {
|
||||||
@ -194,6 +201,5 @@ export default {
|
|||||||
text: '针对用户提问调试段落匹配情况,保障回答效果。',
|
text: '针对用户提问调试段落匹配情况,保障回答效果。',
|
||||||
emptyMessage1: '命中段落显示在这里',
|
emptyMessage1: '命中段落显示在这里',
|
||||||
emptyMessage2: '没有命中的分段'
|
emptyMessage2: '没有命中的分段'
|
||||||
},
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -113,7 +113,8 @@ export default {
|
|||||||
tooltip: `關閉後該節點的內容則不輸出給用戶。
|
tooltip: `關閉後該節點的內容則不輸出給用戶。
|
||||||
如果你想讓用戶看到該節點的輸出內容,請打開開關。`
|
如果你想讓用戶看到該節點的輸出內容,請打開開關。`
|
||||||
},
|
},
|
||||||
defaultPrompt: '已知信息'
|
defaultPrompt: '已知信息',
|
||||||
|
think: '思考過程',
|
||||||
},
|
},
|
||||||
searchDatasetNode: {
|
searchDatasetNode: {
|
||||||
label: '知識庫檢索',
|
label: '知識庫檢索',
|
||||||
|
|||||||
@ -97,6 +97,12 @@ export default {
|
|||||||
browser: '瀏覽器播放(免費)',
|
browser: '瀏覽器播放(免費)',
|
||||||
tts: 'TTS模型',
|
tts: 'TTS模型',
|
||||||
listeningTest: '試聽'
|
listeningTest: '試聽'
|
||||||
|
},
|
||||||
|
reasoningContent: {
|
||||||
|
label: '輸出思考',
|
||||||
|
tooltip:'請根據模型返回的思考標簽設置,標簽中間的內容將爲認定爲思考過程',
|
||||||
|
start: '開始',
|
||||||
|
end: '結束',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
buttons: {
|
buttons: {
|
||||||
|
|||||||
@ -65,6 +65,7 @@
|
|||||||
<template #label>
|
<template #label>
|
||||||
<div class="flex-between">
|
<div class="flex-between">
|
||||||
<span>{{ $t('views.application.applicationForm.form.aiModel.label') }}</span>
|
<span>{{ $t('views.application.applicationForm.form.aiModel.label') }}</span>
|
||||||
|
|
||||||
<el-button
|
<el-button
|
||||||
type="primary"
|
type="primary"
|
||||||
link
|
link
|
||||||
@ -276,6 +277,27 @@
|
|||||||
@submitDialog="submitPrologueDialog"
|
@submitDialog="submitPrologueDialog"
|
||||||
/>
|
/>
|
||||||
</el-form-item>
|
</el-form-item>
|
||||||
|
<el-form-item @click.prevent>
|
||||||
|
<template #label>
|
||||||
|
<div class="flex-between">
|
||||||
|
<span class="mr-4">
|
||||||
|
{{ $t('views.application.applicationForm.form.reasoningContent.label') }}
|
||||||
|
</span>
|
||||||
|
|
||||||
|
<div class="flex">
|
||||||
|
<el-button type="primary" link @click="openReasoningParamSettingDialog">
|
||||||
|
<el-icon><Setting /></el-icon>
|
||||||
|
</el-button>
|
||||||
|
<el-switch
|
||||||
|
class="ml-8"
|
||||||
|
size="small"
|
||||||
|
v-model="applicationForm.model_setting.reasoning_content_enable"
|
||||||
|
@change="sttModelEnableChange"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
</el-form-item>
|
||||||
|
|
||||||
<el-form-item
|
<el-form-item
|
||||||
prop="stt_model_id"
|
prop="stt_model_id"
|
||||||
@ -453,6 +475,10 @@
|
|||||||
/>
|
/>
|
||||||
|
|
||||||
<EditAvatarDialog ref="EditAvatarDialogRef" @refresh="refreshIcon" />
|
<EditAvatarDialog ref="EditAvatarDialogRef" @refresh="refreshIcon" />
|
||||||
|
<ReasoningParamSettingDialog
|
||||||
|
ref="ReasoningParamSettingDialogRef"
|
||||||
|
@refresh="submitReasoningDialog"
|
||||||
|
/>
|
||||||
</LayoutContainer>
|
</LayoutContainer>
|
||||||
</template>
|
</template>
|
||||||
<script setup lang="ts">
|
<script setup lang="ts">
|
||||||
@ -472,6 +498,7 @@ import { MsgSuccess, MsgWarning } from '@/utils/message'
|
|||||||
import useStore from '@/stores'
|
import useStore from '@/stores'
|
||||||
import { t } from '@/locales'
|
import { t } from '@/locales'
|
||||||
import TTSModeParamSettingDialog from './component/TTSModeParamSettingDialog.vue'
|
import TTSModeParamSettingDialog from './component/TTSModeParamSettingDialog.vue'
|
||||||
|
import ReasoningParamSettingDialog from './component/ReasoningParamSettingDialog.vue'
|
||||||
|
|
||||||
const { model, application } = useStore()
|
const { model, application } = useStore()
|
||||||
|
|
||||||
@ -493,6 +520,7 @@ const optimizationPrompt =
|
|||||||
t('views.application.applicationForm.dialog.defaultPrompt2')
|
t('views.application.applicationForm.dialog.defaultPrompt2')
|
||||||
|
|
||||||
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
|
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
|
||||||
|
const ReasoningParamSettingDialogRef = ref<InstanceType<typeof ReasoningParamSettingDialog>>()
|
||||||
const TTSModeParamSettingDialogRef = ref<InstanceType<typeof TTSModeParamSettingDialog>>()
|
const TTSModeParamSettingDialogRef = ref<InstanceType<typeof TTSModeParamSettingDialog>>()
|
||||||
const ParamSettingDialogRef = ref<InstanceType<typeof ParamSettingDialog>>()
|
const ParamSettingDialogRef = ref<InstanceType<typeof ParamSettingDialog>>()
|
||||||
|
|
||||||
@ -522,7 +550,8 @@ const applicationForm = ref<ApplicationFormType>({
|
|||||||
model_setting: {
|
model_setting: {
|
||||||
prompt: defaultPrompt,
|
prompt: defaultPrompt,
|
||||||
system: t('views.application.applicationForm.form.roleSettings.placeholder'),
|
system: t('views.application.applicationForm.form.roleSettings.placeholder'),
|
||||||
no_references_prompt: '{question}'
|
no_references_prompt: '{question}',
|
||||||
|
reasoning_content_enable: false,
|
||||||
},
|
},
|
||||||
model_params_setting: {},
|
model_params_setting: {},
|
||||||
problem_optimization: false,
|
problem_optimization: false,
|
||||||
@ -562,6 +591,12 @@ function submitNoReferencesPromptDialog(val: string) {
|
|||||||
function submitSystemDialog(val: string) {
|
function submitSystemDialog(val: string) {
|
||||||
applicationForm.value.model_setting.system = val
|
applicationForm.value.model_setting.system = val
|
||||||
}
|
}
|
||||||
|
function submitReasoningDialog(val: any) {
|
||||||
|
applicationForm.value.model_setting = {
|
||||||
|
...applicationForm.value.model_setting,
|
||||||
|
...val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const submit = async (formEl: FormInstance | undefined) => {
|
const submit = async (formEl: FormInstance | undefined) => {
|
||||||
if (!formEl) return
|
if (!formEl) return
|
||||||
@ -591,6 +626,10 @@ const openAIParamSettingDialog = () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const openReasoningParamSettingDialog = () => {
|
||||||
|
ReasoningParamSettingDialogRef.value?.open(applicationForm.value.model_setting)
|
||||||
|
}
|
||||||
|
|
||||||
const openTTSParamSettingDialog = () => {
|
const openTTSParamSettingDialog = () => {
|
||||||
if (applicationForm.value.tts_model_id) {
|
if (applicationForm.value.tts_model_id) {
|
||||||
TTSModeParamSettingDialogRef.value?.open(
|
TTSModeParamSettingDialogRef.value?.open(
|
||||||
|
|||||||
@ -0,0 +1,107 @@
|
|||||||
|
<template>
|
||||||
|
<el-dialog
|
||||||
|
align-center
|
||||||
|
:title="$t('common.setting')"
|
||||||
|
class="param-dialog"
|
||||||
|
v-model="dialogVisible"
|
||||||
|
style="width: 550px"
|
||||||
|
append-to-body
|
||||||
|
:close-on-click-modal="false"
|
||||||
|
:close-on-press-escape="false"
|
||||||
|
>
|
||||||
|
<el-form label-position="top" ref="paramFormRef" :model="form" class="p-12-16">
|
||||||
|
<el-text type="info" class="color-secondary">{{
|
||||||
|
$t('views.application.applicationForm.form.reasoningContent.tooltip')
|
||||||
|
}}</el-text>
|
||||||
|
<el-row class="mt-16" :gutter="20">
|
||||||
|
<el-col :span="12">
|
||||||
|
<el-form-item
|
||||||
|
:label="$t('views.application.applicationForm.form.reasoningContent.start')"
|
||||||
|
>
|
||||||
|
<el-input
|
||||||
|
v-model="form.reasoning_content_start"
|
||||||
|
:rows="6"
|
||||||
|
maxlength="50"
|
||||||
|
placeholder="<think>"
|
||||||
|
/>
|
||||||
|
</el-form-item>
|
||||||
|
</el-col>
|
||||||
|
<el-col :span="12">
|
||||||
|
<el-form-item :label="$t('views.application.applicationForm.form.reasoningContent.end')">
|
||||||
|
<el-input
|
||||||
|
v-model="form.reasoning_content_end"
|
||||||
|
:rows="6"
|
||||||
|
maxlength="50"
|
||||||
|
placeholder="</think>"
|
||||||
|
/>
|
||||||
|
</el-form-item>
|
||||||
|
</el-col>
|
||||||
|
</el-row>
|
||||||
|
</el-form>
|
||||||
|
|
||||||
|
<template #footer>
|
||||||
|
<span class="dialog-footer p-16">
|
||||||
|
<el-button @click.prevent="dialogVisible = false">{{ $t('common.cancel') }}</el-button>
|
||||||
|
<el-button type="primary" @click="submit()" :loading="loading">
|
||||||
|
{{ $t('common.save') }}
|
||||||
|
</el-button>
|
||||||
|
</span>
|
||||||
|
</template>
|
||||||
|
</el-dialog>
|
||||||
|
</template>
|
||||||
|
<script setup lang="ts">
|
||||||
|
import { ref, watch, reactive } from 'vue'
|
||||||
|
|
||||||
|
const emit = defineEmits(['refresh'])
|
||||||
|
|
||||||
|
const form = ref<any>({
|
||||||
|
reasoning_content_start: '<think>',
|
||||||
|
reasoning_content_end: '</think>'
|
||||||
|
})
|
||||||
|
|
||||||
|
const dialogVisible = ref<boolean>(false)
|
||||||
|
const loading = ref(false)
|
||||||
|
watch(dialogVisible, (bool) => {
|
||||||
|
if (!bool) {
|
||||||
|
form.value = {
|
||||||
|
reasoning_content_start: '<think>',
|
||||||
|
reasoning_content_end: '</think>'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const open = (data: any) => {
|
||||||
|
form.value = { ...form.value, ...data }
|
||||||
|
dialogVisible.value = true
|
||||||
|
}
|
||||||
|
|
||||||
|
const submit = () => {
|
||||||
|
emit('refresh', form.value)
|
||||||
|
dialogVisible.value = false
|
||||||
|
}
|
||||||
|
|
||||||
|
defineExpose({ open })
|
||||||
|
</script>
|
||||||
|
<style lang="scss" scope>
|
||||||
|
.param-dialog {
|
||||||
|
padding: 8px 8px 24px 8px;
|
||||||
|
|
||||||
|
.el-dialog__header {
|
||||||
|
padding: 16px 16px 0 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.el-dialog__body {
|
||||||
|
padding: 0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.dialog-max-height {
|
||||||
|
height: 560px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.custom-slider {
|
||||||
|
.el-input-number.is-without-controls .el-input__wrapper {
|
||||||
|
padding: 0 !important;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
@ -65,6 +65,10 @@ export const aiChatNode = {
|
|||||||
{
|
{
|
||||||
label: t('views.applicationWorkflow.nodes.aiChatNode.answer'),
|
label: t('views.applicationWorkflow.nodes.aiChatNode.answer'),
|
||||||
value: 'answer'
|
value: 'answer'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t('views.applicationWorkflow.nodes.aiChatNode.think'),
|
||||||
|
value: 'reasoning_content'
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,7 +7,6 @@
|
|||||||
:model="chat_data"
|
:model="chat_data"
|
||||||
label-position="top"
|
label-position="top"
|
||||||
require-asterisk-position="right"
|
require-asterisk-position="right"
|
||||||
class="mb-24"
|
|
||||||
label-width="auto"
|
label-width="auto"
|
||||||
ref="aiChatNodeFormRef"
|
ref="aiChatNodeFormRef"
|
||||||
hide-required-asterisk
|
hide-required-asterisk
|
||||||
@ -29,6 +28,7 @@
|
|||||||
}}<span class="danger">*</span></span
|
}}<span class="danger">*</span></span
|
||||||
>
|
>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<el-button
|
<el-button
|
||||||
:disabled="!chat_data.model_id"
|
:disabled="!chat_data.model_id"
|
||||||
type="primary"
|
type="primary"
|
||||||
@ -114,10 +114,8 @@
|
|||||||
:step-strictly="true"
|
:step-strictly="true"
|
||||||
/>
|
/>
|
||||||
</el-form-item>
|
</el-form-item>
|
||||||
<el-form-item
|
|
||||||
:label="$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')"
|
<el-form-item @click.prevent>
|
||||||
@click.prevent
|
|
||||||
>
|
|
||||||
<template #label>
|
<template #label>
|
||||||
<div class="flex align-center">
|
<div class="flex align-center">
|
||||||
<div class="mr-4">
|
<div class="mr-4">
|
||||||
@ -136,14 +134,38 @@
|
|||||||
</template>
|
</template>
|
||||||
<el-switch size="small" v-model="chat_data.is_result" />
|
<el-switch size="small" v-model="chat_data.is_result" />
|
||||||
</el-form-item>
|
</el-form-item>
|
||||||
|
<el-form-item @click.prevent>
|
||||||
|
<template #label>
|
||||||
|
<div class="flex-between w-full">
|
||||||
|
<div>
|
||||||
|
<span>{{
|
||||||
|
$t('views.application.applicationForm.form.reasoningContent.label')
|
||||||
|
}}</span>
|
||||||
|
</div>
|
||||||
|
<el-button
|
||||||
|
type="primary"
|
||||||
|
link
|
||||||
|
@click="openReasoningParamSettingDialog"
|
||||||
|
@refreshForm="refreshParam"
|
||||||
|
>
|
||||||
|
<el-icon><Setting /></el-icon>
|
||||||
|
</el-button>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
<el-switch size="small" v-model="chat_data.model_setting.reasoning_content_enable" />
|
||||||
|
</el-form-item>
|
||||||
</el-form>
|
</el-form>
|
||||||
</el-card>
|
</el-card>
|
||||||
|
|
||||||
<AIModeParamSettingDialog ref="AIModeParamSettingDialogRef" @refresh="refreshParam" />
|
<AIModeParamSettingDialog ref="AIModeParamSettingDialogRef" @refresh="refreshParam" />
|
||||||
|
<ReasoningParamSettingDialog
|
||||||
|
ref="ReasoningParamSettingDialogRef"
|
||||||
|
@refresh="submitReasoningDialog"
|
||||||
|
/>
|
||||||
</NodeContainer>
|
</NodeContainer>
|
||||||
</template>
|
</template>
|
||||||
<script setup lang="ts">
|
<script setup lang="ts">
|
||||||
import { set, groupBy } from 'lodash'
|
import { cloneDeep, set, groupBy } from 'lodash'
|
||||||
import { app } from '@/main'
|
import { app } from '@/main'
|
||||||
import NodeContainer from '@/workflow/common/NodeContainer.vue'
|
import NodeContainer from '@/workflow/common/NodeContainer.vue'
|
||||||
import type { FormInstance } from 'element-plus'
|
import type { FormInstance } from 'element-plus'
|
||||||
@ -153,6 +175,7 @@ import useStore from '@/stores'
|
|||||||
import { isLastNode } from '@/workflow/common/data'
|
import { isLastNode } from '@/workflow/common/data'
|
||||||
import AIModeParamSettingDialog from '@/views/application/component/AIModeParamSettingDialog.vue'
|
import AIModeParamSettingDialog from '@/views/application/component/AIModeParamSettingDialog.vue'
|
||||||
import { t } from '@/locales'
|
import { t } from '@/locales'
|
||||||
|
import ReasoningParamSettingDialog from '@/views/application/component/ReasoningParamSettingDialog.vue'
|
||||||
const { model } = useStore()
|
const { model } = useStore()
|
||||||
|
|
||||||
const wheel = (e: any) => {
|
const wheel = (e: any) => {
|
||||||
@ -198,16 +221,29 @@ const form = {
|
|||||||
is_result: false,
|
is_result: false,
|
||||||
temperature: null,
|
temperature: null,
|
||||||
max_tokens: null,
|
max_tokens: null,
|
||||||
dialogue_type: 'WORKFLOW'
|
dialogue_type: 'WORKFLOW',
|
||||||
|
model_setting: {
|
||||||
|
reasoning_content_start: '<think>',
|
||||||
|
reasoning_content_end: '</think>',
|
||||||
|
reasoning_content_enable: false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const chat_data = computed({
|
const chat_data = computed({
|
||||||
get: () => {
|
get: () => {
|
||||||
if (props.nodeModel.properties.node_data) {
|
if (props.nodeModel.properties.node_data) {
|
||||||
|
if (!props.nodeModel.properties.node_data.model_setting) {
|
||||||
|
set(props.nodeModel.properties.node_data, 'model_setting', {
|
||||||
|
reasoning_content_start: '<think>',
|
||||||
|
reasoning_content_end: '</think>',
|
||||||
|
reasoning_content_enable: false
|
||||||
|
})
|
||||||
|
}
|
||||||
return props.nodeModel.properties.node_data
|
return props.nodeModel.properties.node_data
|
||||||
} else {
|
} else {
|
||||||
set(props.nodeModel.properties, 'node_data', form)
|
set(props.nodeModel.properties, 'node_data', form)
|
||||||
}
|
}
|
||||||
|
|
||||||
return props.nodeModel.properties.node_data
|
return props.nodeModel.properties.node_data
|
||||||
},
|
},
|
||||||
set: (value) => {
|
set: (value) => {
|
||||||
@ -220,6 +256,7 @@ const aiChatNodeFormRef = ref<FormInstance>()
|
|||||||
|
|
||||||
const modelOptions = ref<any>(null)
|
const modelOptions = ref<any>(null)
|
||||||
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
|
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
|
||||||
|
const ReasoningParamSettingDialogRef = ref<InstanceType<typeof ReasoningParamSettingDialog>>()
|
||||||
const validate = () => {
|
const validate = () => {
|
||||||
return aiChatNodeFormRef.value?.validate().catch((err) => {
|
return aiChatNodeFormRef.value?.validate().catch((err) => {
|
||||||
return Promise.reject({ node: props.nodeModel, errMessage: err })
|
return Promise.reject({ node: props.nodeModel, errMessage: err })
|
||||||
@ -244,10 +281,24 @@ const openAIParamSettingDialog = (modelId: string) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const openReasoningParamSettingDialog = () => {
|
||||||
|
ReasoningParamSettingDialogRef.value?.open(chat_data.value.model_setting)
|
||||||
|
}
|
||||||
|
|
||||||
function refreshParam(data: any) {
|
function refreshParam(data: any) {
|
||||||
set(props.nodeModel.properties.node_data, 'model_params_setting', data)
|
set(props.nodeModel.properties.node_data, 'model_params_setting', data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function submitReasoningDialog(val: any) {
|
||||||
|
let model_setting = cloneDeep(props.nodeModel.properties.node_data.model_setting)
|
||||||
|
model_setting = {
|
||||||
|
...model_setting,
|
||||||
|
...val
|
||||||
|
}
|
||||||
|
|
||||||
|
set(props.nodeModel.properties.node_data, 'model_setting', model_setting)
|
||||||
|
}
|
||||||
|
|
||||||
onMounted(() => {
|
onMounted(() => {
|
||||||
getModel()
|
getModel()
|
||||||
if (typeof props.nodeModel.properties.node_data?.is_result === 'undefined') {
|
if (typeof props.nodeModel.properties.node_data?.is_result === 'undefined') {
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user