diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ce7ad7db98d5f9..f810584f24115c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,7 +81,7 @@ Dify requires the following dependencies to build, make sure they're installed o Dify is composed of a backend and a frontend. Navigate to the backend directory by `cd api/`, then follow the [Backend README](api/README.md) to install it. In a separate terminal, navigate to the frontend directory by `cd web/`, then follow the [Frontend README](web/README.md) to install. -Check the [installation FAQ](https://docs.dify.ai/getting-started/faq/install-faq) for a list of common issues and steps to troubleshoot. +Check the [installation FAQ](https://docs.dify.ai/learn-more/faq/self-host-faq) for a list of common issues and steps to troubleshoot. ### 5. Visit dify in your browser diff --git a/CONTRIBUTING_CN.md b/CONTRIBUTING_CN.md index 08fd34e117ba61..303c2513f53b9b 100644 --- a/CONTRIBUTING_CN.md +++ b/CONTRIBUTING_CN.md @@ -77,7 +77,7 @@ Dify 依赖以下工具和库: Dify 由后端和前端组成。通过 `cd api/` 导航到后端目录,然后按照 [后端 README](api/README.md) 进行安装。在另一个终端中,通过 `cd web/` 导航到前端目录,然后按照 [前端 README](web/README.md) 进行安装。 -查看 [安装常见问题解答](https://docs.dify.ai/getting-started/faq/install-faq) 以获取常见问题列表和故障排除步骤。 +查看 [安装常见问题解答](https://docs.dify.ai/v/zh-hans/learn-more/faq/install-faq) 以获取常见问题列表和故障排除步骤。 ### 5. 在浏览器中访问 Dify diff --git a/CONTRIBUTING_JA.md b/CONTRIBUTING_JA.md index e8f5456a3c44b7..1ce8436a7870d1 100644 --- a/CONTRIBUTING_JA.md +++ b/CONTRIBUTING_JA.md @@ -82,7 +82,7 @@ Dify はバックエンドとフロントエンドから構成されています まず`cd api/`でバックエンドのディレクトリに移動し、[Backend README](api/README.md)に従ってインストールします。 次に別のターミナルで、`cd web/`でフロントエンドのディレクトリに移動し、[Frontend README](web/README.md)に従ってインストールしてください。 -よくある問題とトラブルシューティングの手順については、[installation FAQ](https://docs.dify.ai/getting-started/faq/install-faq) を確認してください。 +よくある問題とトラブルシューティングの手順については、[installation FAQ](https://docs.dify.ai/v/japanese/learn-more/faq/install-faq) を確認してください。 ### 5. ブラウザで dify にアクセスする diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 9166372df50517..934b6413aeeb91 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -1,10 +1,11 @@ import flask_restful -from flask import current_app, request +from flask import request from flask_login import current_user from flask_restful import Resource, marshal, marshal_with, reqparse from werkzeug.exceptions import Forbidden, NotFound import services +from configs import dify_config from controllers.console import api from controllers.console.apikey import api_key_fields, api_key_list from controllers.console.app.error import ProviderNotInitializeError @@ -530,7 +531,7 @@ class DatasetApiBaseUrlApi(Resource): @account_initialization_required def get(self): return { - 'api_base_url': (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL'] + 'api_base_url': (dify_config.SERVICE_API_URL if dify_config.SERVICE_API_URL else request.host_url.rstrip('/')) + '/v1' } @@ -540,7 +541,7 @@ class DatasetRetrievalSettingApi(Resource): @login_required @account_initialization_required def get(self): - vector_type = current_app.config['VECTOR_STORE'] + vector_type = dify_config.VECTOR_STORE match vector_type: case VectorType.MILVUS | VectorType.RELYT | VectorType.PGVECTOR | VectorType.TIDB_VECTOR | VectorType.CHROMA | VectorType.TENCENT | VectorType.ORACLE: return { diff --git a/api/controllers/console/datasets/file.py b/api/controllers/console/datasets/file.py index c13bd45abb02fe..3b2083bcc3351c 100644 --- a/api/controllers/console/datasets/file.py +++ b/api/controllers/console/datasets/file.py @@ -1,8 +1,9 @@ -from flask import current_app, request +from flask import request from flask_login import current_user from flask_restful import Resource, marshal_with import services +from configs import dify_config from controllers.console import api from controllers.console.datasets.error import ( FileTooLargeError, @@ -26,9 +27,9 @@ class FileApi(Resource): @account_initialization_required @marshal_with(upload_config_fields) def get(self): - file_size_limit = current_app.config.get("UPLOAD_FILE_SIZE_LIMIT") - batch_count_limit = current_app.config.get("UPLOAD_FILE_BATCH_LIMIT") - image_file_size_limit = current_app.config.get("UPLOAD_IMAGE_FILE_SIZE_LIMIT") + file_size_limit = dify_config.UPLOAD_FILE_SIZE_LIMIT + batch_count_limit = dify_config.UPLOAD_FILE_BATCH_LIMIT + image_file_size_limit = dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT return { 'file_size_limit': file_size_limit, 'batch_count_limit': batch_count_limit, @@ -76,7 +77,7 @@ class FileSupportTypeApi(Resource): @login_required @account_initialization_required def get(self): - etl_type = current_app.config['ETL_TYPE'] + etl_type = dify_config.ETL_TYPE allowed_extensions = UNSTRUCTURED_ALLOWED_EXTENSIONS if etl_type == 'Unstructured' else ALLOWED_EXTENSIONS return {'allowed_extensions': allowed_extensions} diff --git a/api/controllers/console/explore/parameter.py b/api/controllers/console/explore/parameter.py index 45255edb3a6887..0a168d6306c83e 100644 --- a/api/controllers/console/explore/parameter.py +++ b/api/controllers/console/explore/parameter.py @@ -1,7 +1,7 @@ -from flask import current_app from flask_restful import fields, marshal_with +from configs import dify_config from controllers.console import api from controllers.console.app.error import AppUnavailableError from controllers.console.explore.wraps import InstalledAppResource @@ -78,7 +78,7 @@ def get(self, installed_app: InstalledApp): "transfer_methods": ["remote_url", "local_file"] }}), 'system_parameters': { - 'image_file_size_limit': current_app.config.get('UPLOAD_IMAGE_FILE_SIZE_LIMIT') + 'image_file_size_limit': dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT } } diff --git a/api/controllers/console/init_validate.py b/api/controllers/console/init_validate.py index b319f706b4dc38..6feb1003a975f6 100644 --- a/api/controllers/console/init_validate.py +++ b/api/controllers/console/init_validate.py @@ -1,8 +1,9 @@ import os -from flask import current_app, session +from flask import session from flask_restful import Resource, reqparse +from configs import dify_config from libs.helper import str_len from models.model import DifySetup from services.account_service import TenantService @@ -40,7 +41,7 @@ def post(self): return {'result': 'success'}, 201 def get_init_validate_status(): - if current_app.config['EDITION'] == 'SELF_HOSTED': + if dify_config.EDITION == 'SELF_HOSTED': if os.environ.get('INIT_PASSWORD'): return session.get('is_init_validated') or DifySetup.query.first() diff --git a/api/controllers/console/setup.py b/api/controllers/console/setup.py index def50212a18b82..ef7cc6bc03ce3a 100644 --- a/api/controllers/console/setup.py +++ b/api/controllers/console/setup.py @@ -1,8 +1,9 @@ from functools import wraps -from flask import current_app, request +from flask import request from flask_restful import Resource, reqparse +from configs import dify_config from libs.helper import email, get_remote_ip, str_len from libs.password import valid_password from models.model import DifySetup @@ -17,7 +18,7 @@ class SetupApi(Resource): def get(self): - if current_app.config['EDITION'] == 'SELF_HOSTED': + if dify_config.EDITION == 'SELF_HOSTED': setup_status = get_setup_status() if setup_status: return { @@ -77,7 +78,7 @@ def decorated(*args, **kwargs): def get_setup_status(): - if current_app.config['EDITION'] == 'SELF_HOSTED': + if dify_config.EDITION == 'SELF_HOSTED': return DifySetup.query.first() else: return True diff --git a/api/controllers/console/version.py b/api/controllers/console/version.py index faf36c4f40020f..1fcf4bdc00e5b1 100644 --- a/api/controllers/console/version.py +++ b/api/controllers/console/version.py @@ -3,9 +3,10 @@ import logging import requests -from flask import current_app from flask_restful import Resource, reqparse +from configs import dify_config + from . import api @@ -15,16 +16,16 @@ def get(self): parser = reqparse.RequestParser() parser.add_argument('current_version', type=str, required=True, location='args') args = parser.parse_args() - check_update_url = current_app.config['CHECK_UPDATE_URL'] + check_update_url = dify_config.CHECK_UPDATE_URL result = { - 'version': current_app.config['CURRENT_VERSION'], + 'version': dify_config.CURRENT_VERSION, 'release_date': '', 'release_notes': '', 'can_auto_update': False, 'features': { - 'can_replace_logo': current_app.config['CAN_REPLACE_LOGO'], - 'model_load_balancing_enabled': current_app.config['MODEL_LB_ENABLED'] + 'can_replace_logo': dify_config.CAN_REPLACE_LOGO, + 'model_load_balancing_enabled': dify_config.MODEL_LB_ENABLED } } diff --git a/api/controllers/console/workspace/account.py b/api/controllers/console/workspace/account.py index 0b5c84c2a3d24d..1056d5eb62faf2 100644 --- a/api/controllers/console/workspace/account.py +++ b/api/controllers/console/workspace/account.py @@ -1,10 +1,11 @@ import datetime import pytz -from flask import current_app, request +from flask import request from flask_login import current_user from flask_restful import Resource, fields, marshal_with, reqparse +from configs import dify_config from constants.languages import supported_language from controllers.console import api from controllers.console.setup import setup_required @@ -36,7 +37,7 @@ def post(self): parser = reqparse.RequestParser() - if current_app.config['EDITION'] == 'CLOUD': + if dify_config.EDITION == 'CLOUD': parser.add_argument('invitation_code', type=str, location='json') parser.add_argument( @@ -45,7 +46,7 @@ def post(self): required=True, location='json') args = parser.parse_args() - if current_app.config['EDITION'] == 'CLOUD': + if dify_config.EDITION == 'CLOUD': if not args['invitation_code']: raise ValueError('invitation_code is required') diff --git a/api/controllers/console/workspace/members.py b/api/controllers/console/workspace/members.py index 0e756778ab0e85..34e9da384106a7 100644 --- a/api/controllers/console/workspace/members.py +++ b/api/controllers/console/workspace/members.py @@ -1,8 +1,8 @@ -from flask import current_app from flask_login import current_user from flask_restful import Resource, abort, marshal_with, reqparse import services +from configs import dify_config from controllers.console import api from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check @@ -48,7 +48,7 @@ def post(self): inviter = current_user invitation_results = [] - console_web_url = current_app.config.get("CONSOLE_WEB_URL") + console_web_url = dify_config.CONSOLE_WEB_URL for invitee_email in invitee_emails: try: token = RegisterService.invite_new_member(inviter.current_tenant, invitee_email, interface_language, role=invitee_role, inviter=inviter) diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 6e3f78d4e2e55b..bafeabb08ae2c7 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -1,10 +1,11 @@ import io -from flask import current_app, send_file +from flask import send_file from flask_login import current_user from flask_restful import Resource, reqparse from werkzeug.exceptions import Forbidden +from configs import dify_config from controllers.console import api from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required @@ -104,7 +105,7 @@ class ToolBuiltinProviderIconApi(Resource): @setup_required def get(self, provider): icon_bytes, mimetype = BuiltinToolManageService.get_builtin_tool_provider_icon(provider) - icon_cache_max_age = current_app.config.get('TOOL_ICON_CACHE_MAX_AGE') + icon_cache_max_age = dify_config.TOOL_ICON_CACHE_MAX_AGE return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age) class ToolApiProviderAddApi(Resource): diff --git a/api/controllers/console/wraps.py b/api/controllers/console/wraps.py index 7c8ad110784ee0..3baf69acfd2d5f 100644 --- a/api/controllers/console/wraps.py +++ b/api/controllers/console/wraps.py @@ -1,9 +1,10 @@ import json from functools import wraps -from flask import abort, current_app, request +from flask import abort, request from flask_login import current_user +from configs import dify_config from controllers.console.workspace.error import AccountNotInitializedError from services.feature_service import FeatureService from services.operation_service import OperationService @@ -26,7 +27,7 @@ def decorated(*args, **kwargs): def only_edition_cloud(view): @wraps(view) def decorated(*args, **kwargs): - if current_app.config['EDITION'] != 'CLOUD': + if dify_config.EDITION != 'CLOUD': abort(404) return view(*args, **kwargs) @@ -37,7 +38,7 @@ def decorated(*args, **kwargs): def only_edition_self_hosted(view): @wraps(view) def decorated(*args, **kwargs): - if current_app.config['EDITION'] != 'SELF_HOSTED': + if dify_config.EDITION != 'SELF_HOSTED': abort(404) return view(*args, **kwargs) diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index bec76e7a246d12..7019b5e39fdfb9 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -342,10 +342,14 @@ def extract_tool_calls(self, llm_result_chunk: LLMResultChunk) -> Union[None, li """ tool_calls = [] for prompt_message in llm_result_chunk.delta.message.tool_calls: + args = {} + if prompt_message.function.arguments != '': + args = json.loads(prompt_message.function.arguments) + tool_calls.append(( prompt_message.id, prompt_message.function.name, - json.loads(prompt_message.function.arguments), + args, )) return tool_calls @@ -359,10 +363,14 @@ def extract_blocking_tool_calls(self, llm_result: LLMResult) -> Union[None, list """ tool_calls = [] for prompt_message in llm_result.message.tool_calls: + args = {} + if prompt_message.function.arguments != '': + args = json.loads(prompt_message.function.arguments) + tool_calls.append(( prompt_message.id, prompt_message.function.name, - json.loads(prompt_message.function.arguments), + args, )) return tool_calls diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 170a28432bc0ef..a1737f00c61624 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -64,6 +64,7 @@ SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keeping each question under 20 characters.\n" + "MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" "The output must be an array in JSON format following the specified schema:\n" "[\"question1\",\"question2\",\"question3\"]\n" ) diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index 21f1965e93adac..b33d4dd7cb342c 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -103,7 +103,7 @@ def get_history_prompt_messages(self, max_token_limit: int = 2000, if curr_message_tokens > max_token_limit: pruned_memory = [] - while curr_message_tokens > max_token_limit and prompt_messages: + while curr_message_tokens > max_token_limit and len(prompt_messages)>1: pruned_memory.append(prompt_messages.pop(0)) curr_message_tokens = self.model_instance.get_llm_num_tokens( prompt_messages diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml index 7304a58affc6f0..6f23e0647d6eec 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml @@ -24,7 +24,7 @@ parameter_rules: use_template: max_tokens default: 512 min: 1 - max: 4096 + max: 16384 - name: response_format label: zh_Hans: 回复格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml index 053cf7d7c10f4e..b97fbf8aabcae4 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml @@ -24,7 +24,7 @@ parameter_rules: use_template: max_tokens default: 512 min: 1 - max: 4096 + max: 16384 - name: response_format label: zh_Hans: 回复格式 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml b/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml index 51131249e587fd..fd4ed1109dfff0 100644 --- a/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml @@ -1,4 +1,5 @@ - openai/gpt-4o +- openai/gpt-4o-mini - openai/gpt-4 - openai/gpt-4-32k - openai/gpt-3.5-turbo diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml new file mode 100644 index 00000000000000..de0bad413653eb --- /dev/null +++ b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml @@ -0,0 +1,43 @@ +model: openai/gpt-4o-mini +label: + en_US: gpt-4o-mini +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 128000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: presence_penalty + use_template: presence_penalty + - name: frequency_penalty + use_template: frequency_penalty + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 16384 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: "0.15" + output: "0.60" + unit: "0.000001" + currency: USD diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml index 34f73dccbba3e9..b308abcb323f3d 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml @@ -35,3 +35,4 @@ parameter_rules: zh_Hans: 禁用模型自行进行外部搜索。 en_US: Disable the model to perform external search. required: false +deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview.yaml similarity index 100% rename from api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview rename to api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview.yaml diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k.yaml new file mode 100644 index 00000000000000..2887a510d05157 --- /dev/null +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k.yaml @@ -0,0 +1,40 @@ +model: ernie-4.0-turbo-8k +label: + en_US: Ernie-4.0-turbo-8K +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + min: 0.1 + max: 1.0 + default: 0.8 + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 2 + max: 2048 + - name: presence_penalty + use_template: presence_penalty + default: 1.0 + min: 1.0 + max: 2.0 + - name: frequency_penalty + use_template: frequency_penalty + - name: response_format + use_template: response_format + - name: disable_search + label: + zh_Hans: 禁用搜索 + en_US: Disable Search + type: boolean + help: + zh_Hans: 禁用模型自行进行外部搜索。 + en_US: Disable the model to perform external search. + required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml index 52e1dc832d9ad3..74451ff9e356e6 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml @@ -28,3 +28,4 @@ parameter_rules: default: 1.0 min: 1.0 max: 2.0 +deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k.yaml new file mode 100644 index 00000000000000..4b11b3e895be9f --- /dev/null +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k.yaml @@ -0,0 +1,30 @@ +model: ernie-character-8k-0321 +label: + en_US: ERNIE-Character-8K +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + min: 0.1 + max: 1.0 + default: 0.95 + - name: top_p + use_template: top_p + min: 0 + max: 1.0 + default: 0.7 + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 2 + max: 1024 + - name: presence_penalty + use_template: presence_penalty + default: 1.0 + min: 1.0 + max: 2.0 diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml index 78325c1d6432e4..97ecb03f87b623 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml @@ -28,3 +28,4 @@ parameter_rules: default: 1.0 min: 1.0 max: 2.0 +deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml index ebb47417cc94e1..7410ce51df00ef 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml @@ -28,3 +28,4 @@ parameter_rules: default: 1.0 min: 1.0 max: 2.0 +deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py index 9aeab04cd229ca..bc7f29cf6ea538 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py +++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py @@ -97,6 +97,7 @@ def get_access_token(api_key: str, secret_key: str) -> 'BaiduAccessToken': baidu_access_tokens_lock.release() return token + class ErnieMessage: class Role(Enum): USER = 'user' @@ -137,7 +138,9 @@ class ErnieBotModel: 'ernie-speed-appbuilder': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ai_apaas', 'ernie-lite-8k-0922': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant', 'ernie-lite-8k-0308': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k', + 'ernie-character-8k': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k', 'ernie-character-8k-0321': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k', + 'ernie-4.0-tutbo-8k': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k', 'ernie-4.0-tutbo-8k-preview': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k-preview', } @@ -149,7 +152,8 @@ class ErnieBotModel: 'ernie-3.5-8k-1222', 'ernie-3.5-4k-0205', 'ernie-3.5-128k', - 'ernie-4.0-8k' + 'ernie-4.0-8k', + 'ernie-4.0-turbo-8k', 'ernie-4.0-turbo-8k-preview' ] diff --git a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py index 123b93fcd5aa2d..442d71293f632d 100644 --- a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py +++ b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py @@ -293,15 +293,18 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: return documents def delete(self) -> None: - from alibabacloud_gpdb20160503 import models as gpdb_20160503_models - request = gpdb_20160503_models.DeleteCollectionRequest( - collection=self._collection_name, - dbinstance_id=self.config.instance_id, - namespace=self.config.namespace, - namespace_password=self.config.namespace_password, - region_id=self.config.region_id, - ) - self._client.delete_collection(request) + try: + from alibabacloud_gpdb20160503 import models as gpdb_20160503_models + request = gpdb_20160503_models.DeleteCollectionRequest( + collection=self._collection_name, + dbinstance_id=self.config.instance_id, + namespace=self.config.namespace, + namespace_password=self.config.namespace_password, + region_id=self.config.region_id, + ) + self._client.delete_collection(request) + except Exception as e: + raise e class AnalyticdbVectorFactory(AbstractVectorFactory): def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings): diff --git a/api/core/rag/datasource/vdb/tencent/tencent_vector.py b/api/core/rag/datasource/vdb/tencent/tencent_vector.py index cdcc22aec99367..3325a1028ece52 100644 --- a/api/core/rag/datasource/vdb/tencent/tencent_vector.py +++ b/api/core/rag/datasource/vdb/tencent/tencent_vector.py @@ -198,8 +198,6 @@ def delete(self) -> None: self._db.drop_collection(name=self._collection_name) - - class TencentVectorFactory(AbstractVectorFactory): def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings) -> TencentVector: diff --git a/api/core/rag/datasource/vdb/vector_base.py b/api/core/rag/datasource/vdb/vector_base.py index dbd8b6284bfc78..17768ab042dd93 100644 --- a/api/core/rag/datasource/vdb/vector_base.py +++ b/api/core/rag/datasource/vdb/vector_base.py @@ -67,3 +67,7 @@ def _filter_duplicate_texts(self, texts: list[Document]) -> list[Document]: def _get_uuids(self, texts: list[Document]) -> list[str]: return [text.metadata['doc_id'] for text in texts] + + @property + def collection_name(self): + return self._collection_name diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index 949a4b58471a07..256abd28afbe53 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -9,6 +9,7 @@ from core.rag.datasource.vdb.vector_base import BaseVector from core.rag.datasource.vdb.vector_type import VectorType from core.rag.models.document import Document +from extensions.ext_redis import redis_client from models.dataset import Dataset @@ -134,6 +135,10 @@ def search_by_full_text( def delete(self) -> None: self._vector_processor.delete() + # delete collection redis cache + if self._vector_processor.collection_name: + collection_exist_cache_key = 'vector_indexing_{}'.format(self._vector_processor.collection_name) + redis_client.delete(collection_exist_cache_key) def _get_embeddings(self) -> Embeddings: model_manager = ModelManager() diff --git a/api/core/tools/provider/builtin/getimgai/_assets/icon.svg b/api/core/tools/provider/builtin/getimgai/_assets/icon.svg new file mode 100644 index 00000000000000..6b2513386da458 --- /dev/null +++ b/api/core/tools/provider/builtin/getimgai/_assets/icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/api/core/tools/provider/builtin/getimgai/getimgai.py b/api/core/tools/provider/builtin/getimgai/getimgai.py new file mode 100644 index 00000000000000..c81d5fa333cd5d --- /dev/null +++ b/api/core/tools/provider/builtin/getimgai/getimgai.py @@ -0,0 +1,22 @@ +from core.tools.errors import ToolProviderCredentialValidationError +from core.tools.provider.builtin.getimgai.tools.text2image import Text2ImageTool +from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController + + +class GetImgAIProvider(BuiltinToolProviderController): + def _validate_credentials(self, credentials: dict) -> None: + try: + # Example validation using the text2image tool + Text2ImageTool().fork_tool_runtime( + runtime={"credentials": credentials} + ).invoke( + user_id='', + tool_parameters={ + "prompt": "A fire egg", + "response_format": "url", + "style": "photorealism", + } + ) + except Exception as e: + raise ToolProviderCredentialValidationError(str(e)) + \ No newline at end of file diff --git a/api/core/tools/provider/builtin/getimgai/getimgai.yaml b/api/core/tools/provider/builtin/getimgai/getimgai.yaml new file mode 100644 index 00000000000000..c9db0a9e22a6c4 --- /dev/null +++ b/api/core/tools/provider/builtin/getimgai/getimgai.yaml @@ -0,0 +1,29 @@ +identity: + author: Matri Qi + name: getimgai + label: + en_US: getimg.ai + zh_CN: getimg.ai + description: + en_US: GetImg API integration for image generation and scraping. + icon: icon.svg + tags: + - image +credentials_for_provider: + getimg_api_key: + type: secret-input + required: true + label: + en_US: getimg.ai API Key + placeholder: + en_US: Please input your getimg.ai API key + help: + en_US: Get your getimg.ai API key from your getimg.ai account settings. If you are using a self-hosted version, you may enter any key at your convenience. + url: https://dashboard.getimg.ai/api-keys + base_url: + type: text-input + required: false + label: + en_US: getimg.ai server's Base URL + placeholder: + en_US: https://api.getimg.ai/v1 diff --git a/api/core/tools/provider/builtin/getimgai/getimgai_appx.py b/api/core/tools/provider/builtin/getimgai/getimgai_appx.py new file mode 100644 index 00000000000000..e28c57649cac4c --- /dev/null +++ b/api/core/tools/provider/builtin/getimgai/getimgai_appx.py @@ -0,0 +1,59 @@ +import logging +import time +from collections.abc import Mapping +from typing import Any + +import requests +from requests.exceptions import HTTPError + +logger = logging.getLogger(__name__) + +class GetImgAIApp: + def __init__(self, api_key: str | None = None, base_url: str | None = None): + self.api_key = api_key + self.base_url = base_url or 'https://api.getimg.ai/v1' + if not self.api_key: + raise ValueError("API key is required") + + def _prepare_headers(self): + headers = { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {self.api_key}' + } + return headers + + def _request( + self, + method: str, + url: str, + data: Mapping[str, Any] | None = None, + headers: Mapping[str, str] | None = None, + retries: int = 3, + backoff_factor: float = 0.3, + ) -> Mapping[str, Any] | None: + for i in range(retries): + try: + response = requests.request(method, url, json=data, headers=headers) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + if i < retries - 1 and isinstance(e, HTTPError) and e.response.status_code >= 500: + time.sleep(backoff_factor * (2 ** i)) + else: + raise + return None + + def text2image( + self, mode: str, **kwargs + ): + data = kwargs['params'] + if not data.get('prompt'): + raise ValueError("Prompt is required") + + endpoint = f'{self.base_url}/{mode}/text-to-image' + headers = self._prepare_headers() + logger.debug(f"Send request to {endpoint=} body={data}") + response = self._request('POST', endpoint, data, headers) + if response is None: + raise HTTPError("Failed to initiate getimg.ai after multiple retries") + return response diff --git a/api/core/tools/provider/builtin/getimgai/tools/text2image.py b/api/core/tools/provider/builtin/getimgai/tools/text2image.py new file mode 100644 index 00000000000000..dad7314479a89d --- /dev/null +++ b/api/core/tools/provider/builtin/getimgai/tools/text2image.py @@ -0,0 +1,39 @@ +import json +from typing import Any, Union + +from core.tools.entities.tool_entities import ToolInvokeMessage +from core.tools.provider.builtin.getimgai.getimgai_appx import GetImgAIApp +from core.tools.tool.builtin_tool import BuiltinTool + + +class Text2ImageTool(BuiltinTool): + def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + app = GetImgAIApp(api_key=self.runtime.credentials['getimg_api_key'], base_url=self.runtime.credentials['base_url']) + + options = { + 'style': tool_parameters.get('style'), + 'prompt': tool_parameters.get('prompt'), + 'aspect_ratio': tool_parameters.get('aspect_ratio'), + 'output_format': tool_parameters.get('output_format', 'jpeg'), + 'response_format': tool_parameters.get('response_format', 'url'), + 'width': tool_parameters.get('width'), + 'height': tool_parameters.get('height'), + 'steps': tool_parameters.get('steps'), + 'negative_prompt': tool_parameters.get('negative_prompt'), + 'prompt_2': tool_parameters.get('prompt_2'), + } + options = {k: v for k, v in options.items() if v} + + text2image_result = app.text2image( + mode=tool_parameters.get('mode', 'essential-v2'), + params=options, + wait=True + ) + + if not isinstance(text2image_result, str): + text2image_result = json.dumps(text2image_result, ensure_ascii=False, indent=4) + + if not text2image_result: + return self.create_text_message("getimg.ai request failed.") + + return self.create_text_message(text2image_result) diff --git a/api/core/tools/provider/builtin/getimgai/tools/text2image.yaml b/api/core/tools/provider/builtin/getimgai/tools/text2image.yaml new file mode 100644 index 00000000000000..d972186f56d6a6 --- /dev/null +++ b/api/core/tools/provider/builtin/getimgai/tools/text2image.yaml @@ -0,0 +1,167 @@ +identity: + name: text2image + author: Matri Qi + label: + en_US: text2image + icon: icon.svg +description: + human: + en_US: Generate image via getimg.ai. + llm: This tool is used to generate image from prompt or image via https://getimg.ai. +parameters: + - name: prompt + type: string + required: true + label: + en_US: prompt + human_description: + en_US: The text prompt used to generate the image. The getimg.aier will generate an image based on this prompt. + llm_description: this prompt text will be used to generate image. + form: llm + - name: mode + type: select + required: false + label: + en_US: mode + human_description: + en_US: The getimg.ai mode to use. The mode determines the endpoint used to generate the image. + form: form + options: + - value: "essential-v2" + label: + en_US: essential-v2 + - value: stable-diffusion-xl + label: + en_US: stable-diffusion-xl + - value: stable-diffusion + label: + en_US: stable-diffusion + - value: latent-consistency + label: + en_US: latent-consistency + - name: style + type: select + required: false + label: + en_US: style + human_description: + en_US: The style preset to use. The style preset guides the generation towards a particular style. It's just efficient for `Essential V2` mode. + form: form + options: + - value: photorealism + label: + en_US: photorealism + - value: anime + label: + en_US: anime + - value: art + label: + en_US: art + - name: aspect_ratio + type: select + required: false + label: + en_US: "aspect ratio" + human_description: + en_US: The aspect ratio of the generated image. It's just efficient for `Essential V2` mode. + form: form + options: + - value: "1:1" + label: + en_US: "1:1" + - value: "4:5" + label: + en_US: "4:5" + - value: "5:4" + label: + en_US: "5:4" + - value: "2:3" + label: + en_US: "2:3" + - value: "3:2" + label: + en_US: "3:2" + - value: "4:7" + label: + en_US: "4:7" + - value: "7:4" + label: + en_US: "7:4" + - name: output_format + type: select + required: false + label: + en_US: "output format" + human_description: + en_US: The file format of the generated image. + form: form + options: + - value: jpeg + label: + en_US: jpeg + - value: png + label: + en_US: png + - name: response_format + type: select + required: false + label: + en_US: "response format" + human_description: + en_US: The format in which the generated images are returned. Must be one of url or b64. URLs are only valid for 1 hour after the image has been generated. + form: form + options: + - value: url + label: + en_US: url + - value: b64 + label: + en_US: b64 + - name: model + type: string + required: false + label: + en_US: model + human_description: + en_US: Model ID supported by this pipeline and family. It's just efficient for `Stable Diffusion XL`, `Stable Diffusion`, `Latent Consistency` mode. + form: form + - name: negative_prompt + type: string + required: false + label: + en_US: negative prompt + human_description: + en_US: Text input that will not guide the image generation. It's just efficient for `Stable Diffusion XL`, `Stable Diffusion`, `Latent Consistency` mode. + form: form + - name: prompt_2 + type: string + required: false + label: + en_US: prompt2 + human_description: + en_US: Prompt sent to second tokenizer and text encoder. If not defined, prompt is used in both text-encoders. It's just efficient for `Stable Diffusion XL` mode. + form: form + - name: width + type: number + required: false + label: + en_US: width + human_description: + en_US: he width of the generated image in pixels. Width needs to be multiple of 64. + form: form + - name: height + type: number + required: false + label: + en_US: height + human_description: + en_US: he height of the generated image in pixels. Height needs to be multiple of 64. + form: form + - name: steps + type: number + required: false + label: + en_US: steps + human_description: + en_US: The number of denoising steps. More steps usually can produce higher quality images, but take more time to generate. It's just efficient for `Stable Diffusion XL`, `Stable Diffusion`, `Latent Consistency` mode. + form: form diff --git a/api/migrations/versions/6e957a32015b_add_embedding_cache_created_at_index.py b/api/migrations/versions/6e957a32015b_add_embedding_cache_created_at_index.py new file mode 100644 index 00000000000000..7445f664cd75a1 --- /dev/null +++ b/api/migrations/versions/6e957a32015b_add_embedding_cache_created_at_index.py @@ -0,0 +1,32 @@ +"""add-embedding-cache-created_at_index + +Revision ID: 6e957a32015b +Revises: fecff1c3da27 +Create Date: 2024-07-19 17:21:34.414705 + +""" +from alembic import op + +import models as models + +# revision identifiers, used by Alembic. +revision = '6e957a32015b' +down_revision = 'fecff1c3da27' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('embeddings', schema=None) as batch_op: + batch_op.create_index('created_at_idx', ['created_at'], unique=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('embeddings', schema=None) as batch_op: + batch_op.drop_index('created_at_idx') + + # ### end Alembic commands ### diff --git a/api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py b/api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py new file mode 100644 index 00000000000000..271b2490de1055 --- /dev/null +++ b/api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py @@ -0,0 +1,54 @@ +"""remove extra tracing app config table and add idx_dataset_permissions_tenant_id + +Revision ID: fecff1c3da27 +Revises: 408176b91ad3 +Create Date: 2024-07-19 12:03:21.217463 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = 'fecff1c3da27' +down_revision = '408176b91ad3' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('tracing_app_configs') + + with op.batch_alter_table('trace_app_config', schema=None) as batch_op: + batch_op.drop_index('tracing_app_config_app_id_idx') + + # idx_dataset_permissions_tenant_id + with op.batch_alter_table('dataset_permissions', schema=None) as batch_op: + batch_op.create_index('idx_dataset_permissions_tenant_id', ['tenant_id']) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + 'tracing_app_configs', + sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('app_id', postgresql.UUID(), nullable=False), + sa.Column('tracing_provider', sa.String(length=255), nullable=True), + sa.Column('tracing_config', postgresql.JSON(astext_type=sa.Text()), nullable=True), + sa.Column( + 'created_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False + ), + sa.Column( + 'updated_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False + ), + sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') + ) + + with op.batch_alter_table('trace_app_config', schema=None) as batch_op: + batch_op.create_index('tracing_app_config_app_id_idx', ['app_id']) + + with op.batch_alter_table('dataset_permissions', schema=None) as batch_op: + batch_op.drop_index('idx_dataset_permissions_tenant_id') + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index d0be005a154878..34dde2dcef737f 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -630,7 +630,8 @@ class Embedding(db.Model): __tablename__ = 'embeddings' __table_args__ = ( db.PrimaryKeyConstraint('id', name='embedding_pkey'), - db.UniqueConstraint('model_name', 'hash', 'provider_name', name='embedding_hash_idx') + db.UniqueConstraint('model_name', 'hash', 'provider_name', name='embedding_hash_idx'), + db.Index('created_at_idx', 'created_at') ) id = db.Column(StringUUID, primary_key=True, server_default=db.text('uuid_generate_v4()')) diff --git a/api/models/model.py b/api/models/model.py index 331bb91c29819f..396cd7ec6382c3 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -1383,7 +1383,7 @@ class TraceAppConfig(db.Model): __tablename__ = 'trace_app_config' __table_args__ = ( db.PrimaryKeyConstraint('id', name='tracing_app_config_pkey'), - db.Index('tracing_app_config_app_id_idx', 'app_id'), + db.Index('trace_app_config_app_id_idx', 'app_id'), ) id = db.Column(StringUUID, server_default=db.text('uuid_generate_v4()')) diff --git a/api/schedule/clean_embedding_cache_task.py b/api/schedule/clean_embedding_cache_task.py index f68c54600a6012..ccc1062266a02f 100644 --- a/api/schedule/clean_embedding_cache_task.py +++ b/api/schedule/clean_embedding_cache_task.py @@ -2,6 +2,7 @@ import time import click +from sqlalchemy import text from werkzeug.exceptions import NotFound import app @@ -18,12 +19,19 @@ def clean_embedding_cache_task(): thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days) while True: try: - embeddings = db.session.query(Embedding).filter(Embedding.created_at < thirty_days_ago) \ + embedding_ids = db.session.query(Embedding.id).filter(Embedding.created_at < thirty_days_ago) \ .order_by(Embedding.created_at.desc()).limit(100).all() + embedding_ids = [embedding_id[0] for embedding_id in embedding_ids] except NotFound: break - for embedding in embeddings: - db.session.delete(embedding) - db.session.commit() + if embedding_ids: + for embedding_id in embedding_ids: + db.session.execute(text( + "DELETE FROM embeddings WHERE id = :embedding_id" + ), {'embedding_id': embedding_id}) + + db.session.commit() + else: + break end_at = time.perf_counter() click.echo(click.style('Cleaned embedding cache from db success latency: {}'.format(end_at - start_at), fg='green')) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index e7af975009e2a7..d5a54ba731ca6a 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -845,7 +845,7 @@ def save_document_with_dataset_id( 'only_main_content': website_info.get('only_main_content', False), 'mode': 'crawl', } - if url.length > 255: + if len(url) > 255: document_name = url[:200] + '...' else: document_name = url diff --git a/web/app/(commonLayout)/tools/page.tsx b/web/app/(commonLayout)/tools/page.tsx index 4e64d8c0dfe8d3..1b08d54ba3e424 100644 --- a/web/app/(commonLayout)/tools/page.tsx +++ b/web/app/(commonLayout)/tools/page.tsx @@ -12,15 +12,16 @@ const Layout: FC = () => { const { isCurrentWorkspaceDatasetOperator } = useAppContext() useEffect(() => { - document.title = `${t('tools.title')} - Dify` + if (typeof window !== 'undefined') + document.title = `${t('tools.title')} - Dify` if (isCurrentWorkspaceDatasetOperator) return router.replace('/datasets') - }, []) + }, [isCurrentWorkspaceDatasetOperator, router, t]) useEffect(() => { if (isCurrentWorkspaceDatasetOperator) return router.replace('/datasets') - }, [isCurrentWorkspaceDatasetOperator]) + }, [isCurrentWorkspaceDatasetOperator, router]) return } diff --git a/web/app/components/base/switch/index.tsx b/web/app/components/base/switch/index.tsx index 0b025ab38b9b6d..b3d60672048a55 100644 --- a/web/app/components/base/switch/index.tsx +++ b/web/app/components/base/switch/index.tsx @@ -47,8 +47,8 @@ const Switch = ({ onChange, size = 'lg', defaultValue = false, disabled = false, }} className={classNames( wrapStyle[size], - enabled ? 'bg-blue-600' : 'bg-gray-200', - 'relative inline-flex flex-shrink-0 cursor-pointer rounded-full border-2 border-transparent transition-colors duration-200 ease-in-out', + enabled ? 'bg-components-toggle-bg' : 'bg-components-toggle-bg-unchecked', + 'relative inline-flex flex-shrink-0 cursor-pointer rounded-[5px] border-2 border-transparent transition-colors duration-200 ease-in-out', disabled ? '!opacity-50 !cursor-not-allowed' : '', className, )} @@ -58,7 +58,7 @@ const Switch = ({ onChange, size = 'lg', defaultValue = false, disabled = false, className={classNames( circleStyle[size], enabled ? translateLeft[size] : 'translate-x-0', - 'pointer-events-none inline-block transform rounded-full bg-white shadow ring-0 transition duration-200 ease-in-out', + 'pointer-events-none inline-block transform rounded-[3px] bg-components-toggle-knob shadow ring-0 transition duration-200 ease-in-out', )} /> diff --git a/web/app/components/share/text-generation/result/index.tsx b/web/app/components/share/text-generation/result/index.tsx index f924f206f494b9..caa2f9183eddea 100644 --- a/web/app/components/share/text-generation/result/index.tsx +++ b/web/app/components/share/text-generation/result/index.tsx @@ -19,6 +19,7 @@ import { NodeRunningStatus, WorkflowRunningStatus } from '@/app/components/workf import type { WorkflowProcess } from '@/app/components/base/chat/types' import { sleep } from '@/utils' import type { SiteInfo } from '@/models/share' +import { TEXT_GENERATION_TIMEOUT_MS } from '@/config' export type IResultProps = { isWorkflow: boolean @@ -186,7 +187,7 @@ const Result: FC = ({ let isEnd = false let isTimeout = false; (async () => { - await sleep(1000 * 60) // 1min timeout + await sleep(TEXT_GENERATION_TIMEOUT_MS) if (!isEnd) { setRespondingFalse() onCompleted(getCompletionRes(), taskId, false) diff --git a/web/app/components/workflow/nodes/_base/components/field.tsx b/web/app/components/workflow/nodes/_base/components/field.tsx index 1301e9f2ed9201..344fc3d7086917 100644 --- a/web/app/components/workflow/nodes/_base/components/field.tsx +++ b/web/app/components/workflow/nodes/_base/components/field.tsx @@ -38,13 +38,13 @@ const Filed: FC = ({ onClick={() => supportFold && toggleFold()} className={cn('flex justify-between items-center', supportFold && 'cursor-pointer')}>
-
{title}
+
{title}
{tooltip && ( {tooltip}
}> - + )} @@ -52,7 +52,7 @@ const Filed: FC = ({
{operations &&
{operations}
} {supportFold && ( - + )}
diff --git a/web/app/components/workflow/nodes/_base/components/next-step/add.tsx b/web/app/components/workflow/nodes/_base/components/next-step/add.tsx index df90fa156b2ae7..0ab0c8e39e2f6e 100644 --- a/web/app/components/workflow/nodes/_base/components/next-step/add.tsx +++ b/web/app/components/workflow/nodes/_base/components/next-step/add.tsx @@ -51,23 +51,23 @@ const Add = ({ return (
{ branchName && (
-
{branchName.toLocaleUpperCase()}
+
{branchName.toLocaleUpperCase()}
) } -
+
{t('workflow.panel.selectNextStep')} diff --git a/web/app/components/workflow/nodes/_base/components/next-step/index.tsx b/web/app/components/workflow/nodes/_base/components/next-step/index.tsx index a6fd940f63effe..261eb3fac71d95 100644 --- a/web/app/components/workflow/nodes/_base/components/next-step/index.tsx +++ b/web/app/components/workflow/nodes/_base/components/next-step/index.tsx @@ -33,7 +33,7 @@ const NextStep = ({ return (
-
+
{ branchName && ( @@ -75,7 +75,7 @@ const Item = ({ toolIcon={toolIcon} className='shrink-0 mr-1.5' /> -
{data.title}
+
{data.title}
{ !nodesReadOnly && ( + - ) @@ -37,8 +37,8 @@ const Line = ({ ) } @@ -47,7 +47,7 @@ const Line = ({ y={index * 48 + 18 - 2} width={1} height={4} - fill='#98A2B3' + className='fill-divider-soild-alt' /> )) diff --git a/web/app/components/workflow/nodes/_base/components/output-vars.tsx b/web/app/components/workflow/nodes/_base/components/output-vars.tsx index 401a5d6a3e5b70..4b7f9fc12e1a51 100644 --- a/web/app/components/workflow/nodes/_base/components/output-vars.tsx +++ b/web/app/components/workflow/nodes/_base/components/output-vars.tsx @@ -3,8 +3,10 @@ import type { FC } from 'react' import React from 'react' import { useTranslation } from 'react-i18next' import { useBoolean } from 'ahooks' +import { + RiArrowDownSLine, +} from '@remixicon/react' import cn from '@/utils/classnames' -import { ChevronRight } from '@/app/components/base/icons/src/vender/line/arrows' type Props = { className?: string @@ -25,9 +27,9 @@ const OutputVars: FC = ({
+ className={cn(className, 'flex justify-between system-sm-semibold-uppercase text-text-secondary cursor-pointer')}>
{title || t('workflow.nodes.common.outputVars')}
- +
{!isFold && (
@@ -57,10 +59,10 @@ export const VarItem: FC = ({ return (
-
{name}
-
{type}
+
{name}
+
{type}
-
+
{description} {subItems && (
diff --git a/web/app/components/workflow/nodes/_base/node.tsx b/web/app/components/workflow/nodes/_base/node.tsx index 6d1e522e663c6f..2cef91f1b16a00 100644 --- a/web/app/components/workflow/nodes/_base/node.tsx +++ b/web/app/components/workflow/nodes/_base/node.tsx @@ -79,7 +79,7 @@ const BaseNode: FC = ({
= ({ className={cn( 'group relative pb-1 shadow-xs', 'border border-transparent rounded-[15px]', - data.type !== BlockEnum.Iteration && 'w-[240px] bg-[#fcfdff]', - data.type === BlockEnum.Iteration && 'flex flex-col w-full h-full bg-[#fcfdff]/80', + data.type !== BlockEnum.Iteration && 'w-[240px] bg-workflow-block-bg', + data.type === BlockEnum.Iteration && 'flex flex-col w-full h-full bg-workflow-block-bg/80', !data._runningStatus && 'hover:shadow-lg', showRunningBorder && '!border-primary-500', showSuccessBorder && '!border-[#12B76A]', @@ -156,7 +156,7 @@ const BaseNode: FC = ({ />
{data.title}
@@ -197,7 +197,7 @@ const BaseNode: FC = ({ } { data.desc && data.type !== BlockEnum.Iteration && ( -
+
{data.desc}
) diff --git a/web/app/components/workflow/nodes/_base/panel.tsx b/web/app/components/workflow/nodes/_base/panel.tsx index 83d05cbff8ffe7..269d8110dca0ae 100644 --- a/web/app/components/workflow/nodes/_base/panel.tsx +++ b/web/app/components/workflow/nodes/_base/panel.tsx @@ -98,21 +98,21 @@ const BasePanel: FC = ({ return (
-
+
-
+
= ({ handleSyncWorkflowDraft(true) }} > - +
) } -
+
handleNodeSelect(id, true)} > - +
@@ -166,10 +166,10 @@ const BasePanel: FC = ({ { !!availableNextBlocks.length && (
-
+
{t('workflow.panel.nextStep').toLocaleUpperCase()}
-
+
{t('workflow.panel.addNextStep')}
diff --git a/web/app/styles/globals.css b/web/app/styles/globals.css index 1c13e7eb5fb58b..9f23c3d473a1d6 100644 --- a/web/app/styles/globals.css +++ b/web/app/styles/globals.css @@ -276,6 +276,10 @@ button:focus-within { line-height: 24px; } +[class*='code-'] { + @apply font-mono; +} + .code-xs-regular { font-size: 12px; font-weight: 400; @@ -563,6 +567,7 @@ button:focus-within { font-weight: 700; line-height: 1.2; } + /* font define end */ /* border radius start */ @@ -625,6 +630,7 @@ button:focus-within { .radius-full { border-radius: 64px; } + /* border radius end */ .link { diff --git a/web/config/index.ts b/web/config/index.ts index 600becf68cadda..9bb4efcd5eba3e 100644 --- a/web/config/index.ts +++ b/web/config/index.ts @@ -245,3 +245,5 @@ Thought: {{agent_scratchpad}} } export const VAR_REGEX = /\{\{(#[a-zA-Z0-9_-]{1,50}(\.[a-zA-Z_][a-zA-Z0-9_]{0,29}){1,10}#)\}\}/gi + +export const TEXT_GENERATION_TIMEOUT_MS = 60000