37 lines
1.2 KiB
Python
37 lines
1.2 KiB
Python
import os
|
|
|
|
# LLM Token Settings
|
|
MAX_CONTEXT_TOKENS = int(os.getenv("MAX_CONTEXT_TOKENS", 262144))
|
|
MAX_OUTPUT_TOKENS = int(os.getenv("MAX_OUTPUT_TOKENS", 8000))
|
|
SUMMARIZATION_MAX_TOKENS = MAX_CONTEXT_TOKENS - MAX_OUTPUT_TOKENS - 1000
|
|
|
|
# Agent Cache Settings
|
|
AGENT_CACHE_MAX_SIZE = int(os.getenv("AGENT_CACHE_MAX_SIZE", 20))
|
|
AGENT_CACHE_TTL = int(os.getenv("AGENT_CACHE_TTL", 180))
|
|
AGENT_CACHE_AUTO_RENEW = os.getenv("AGENT_CACHE_AUTO_RENEW", "true") == "true"
|
|
|
|
|
|
|
|
# API Settings
|
|
BACKEND_HOST = os.getenv("BACKEND_HOST", "https://api-dev.gptbase.ai")
|
|
MASTERKEY = os.getenv("MASTERKEY", "master")
|
|
FASTAPI_URL = os.getenv('FASTAPI_URL', 'http://127.0.0.1:8001')
|
|
|
|
# Project Settings
|
|
PROJECT_DATA_DIR = os.getenv("PROJECT_DATA_DIR", "./projects/data")
|
|
|
|
# Tokenizer Settings
|
|
TOKENIZERS_PARALLELISM = os.getenv("TOKENIZERS_PARALLELISM", "true")
|
|
|
|
# Embedding Model Settings
|
|
SENTENCE_TRANSFORMER_MODEL = os.getenv("SENTENCE_TRANSFORMER_MODEL", "TaylorAI/gte-tiny")
|
|
|
|
# Tool Output Length Control Settings
|
|
TOOL_OUTPUT_MAX_LENGTH = int(SUMMARIZATION_MAX_TOKENS/4)
|
|
TOOL_OUTPUT_TRUNCATION_STRATEGY = os.getenv("TOOL_OUTPUT_TRUNCATION_STRATEGY", "smart")
|
|
|
|
# THINKING ENABLE
|
|
DEFAULT_THINKING_ENABLE = os.getenv("DEFAULT_THINKING_ENABLE", "true") == "true"
|
|
|
|
|