diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index a5ba51ce0e5208..73af3700637121 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -8,6 +8,8 @@ on: - api/core/rag/datasource/** - docker/** - .github/workflows/vdb-tests.yml + - api/poetry.lock + - api/pyproject.toml concurrency: group: vdb-tests-${{ github.head_ref || github.run_id }} diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py index 1f2b8224e8dcca..5d2a0231b0009d 100644 --- a/api/configs/packaging/__init__.py +++ b/api/configs/packaging/__init__.py @@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings): CURRENT_VERSION: str = Field( description="Dify version", - default="0.11.2", + default="0.12.1", ) COMMIT_SHA: str = Field( diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 9687b59cd1d048..da72b704c71bd7 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -190,7 +190,7 @@ def post(self, app_model): ) session.commit() - stmt = select(App).where(App.id == result.app.id) + stmt = select(App).where(App.id == result.app_id) app = session.scalar(stmt) return app, 201 diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 00e5a747329bfb..ffe56ce4105c1e 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -127,7 +127,9 @@ def generate( conversation_id=conversation.id if conversation else None, inputs=conversation.inputs if conversation - else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), + else self._prepare_user_inputs( + user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id + ), query=query, files=file_objs, parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL, diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index d1564a260e2a6d..48ee590e2f4e9f 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -134,7 +134,9 @@ def generate( conversation_id=conversation.id if conversation else None, inputs=conversation.inputs if conversation - else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), + else self._prepare_user_inputs( + user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id + ), query=query, files=file_objs, parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL, diff --git a/api/core/app/apps/base_app_generator.py b/api/core/app/apps/base_app_generator.py index 2c78d957788d57..85b7aced557c5e 100644 --- a/api/core/app/apps/base_app_generator.py +++ b/api/core/app/apps/base_app_generator.py @@ -1,4 +1,4 @@ -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, Optional from core.app.app_config.entities import VariableEntityType @@ -6,7 +6,7 @@ from factories import file_factory if TYPE_CHECKING: - from core.app.app_config.entities import AppConfig, VariableEntity + from core.app.app_config.entities import VariableEntity class BaseAppGenerator: @@ -14,23 +14,23 @@ def _prepare_user_inputs( self, *, user_inputs: Optional[Mapping[str, Any]], - app_config: "AppConfig", + variables: Sequence["VariableEntity"], + tenant_id: str, ) -> Mapping[str, Any]: user_inputs = user_inputs or {} # Filter input variables from form configuration, handle required fields, default values, and option values - variables = app_config.variables user_inputs = { var.variable: self._validate_inputs(value=user_inputs.get(var.variable), variable_entity=var) for var in variables } user_inputs = {k: self._sanitize_value(v) for k, v in user_inputs.items()} # Convert files in inputs to File - entity_dictionary = {item.variable: item for item in app_config.variables} + entity_dictionary = {item.variable: item for item in variables} # Convert single file to File files_inputs = { k: file_factory.build_from_mapping( mapping=v, - tenant_id=app_config.tenant_id, + tenant_id=tenant_id, config=FileUploadConfig( allowed_file_types=entity_dictionary[k].allowed_file_types, allowed_file_extensions=entity_dictionary[k].allowed_file_extensions, @@ -44,7 +44,7 @@ def _prepare_user_inputs( file_list_inputs = { k: file_factory.build_from_mappings( mappings=v, - tenant_id=app_config.tenant_id, + tenant_id=tenant_id, config=FileUploadConfig( allowed_file_types=entity_dictionary[k].allowed_file_types, allowed_file_extensions=entity_dictionary[k].allowed_file_extensions, diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index e683dfef3f7f78..5b3efe12ebaece 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -132,7 +132,9 @@ def generate( conversation_id=conversation.id if conversation else None, inputs=conversation.inputs if conversation - else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), + else self._prepare_user_inputs( + user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id + ), query=query, files=file_objs, parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL, diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 22ee8b096762d3..e9e50015bd5ec5 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -113,7 +113,9 @@ def generate( app_config=app_config, model_conf=ModelConfigConverter.convert(app_config), file_upload_config=file_extra_config, - inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), + inputs=self._prepare_user_inputs( + user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id + ), query=query, files=file_objs, user_id=user.id, diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 65da39b220a7a7..31efe43412ea6b 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -96,7 +96,9 @@ def generate( task_id=str(uuid.uuid4()), app_config=app_config, file_upload_config=file_extra_config, - inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config), + inputs=self._prepare_user_inputs( + user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id + ), files=system_files, user_id=user.id, stream=stream, diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 2872390d4662db..1cf72ae79e4146 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -43,7 +43,6 @@ ) from core.workflow.graph_engine.entities.graph import Graph from core.workflow.nodes import NodeType -from core.workflow.nodes.iteration import IterationNodeData from core.workflow.nodes.node_mapping import node_type_classes_mapping from core.workflow.workflow_entry import WorkflowEntry from extensions.ext_database import db @@ -160,8 +159,6 @@ def _get_graph_and_variable_pool_of_single_iteration( user_inputs=user_inputs, variable_pool=variable_pool, tenant_id=workflow.tenant_id, - node_type=node_type, - node_data=IterationNodeData(**iteration_node_config.get("data", {})), ) return graph, variable_pool diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 9229cbcc0a7c8e..d45726af466538 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -3,6 +3,7 @@ from collections.abc import Mapping, Sequence from datetime import UTC, datetime from typing import Any, Optional, Union, cast +from uuid import uuid4 from sqlalchemy.orm import Session @@ -80,38 +81,38 @@ def _handle_workflow_run_start(self) -> WorkflowRun: inputs[f"sys.{key.value}"] = value - inputs = WorkflowEntry.handle_special_values(inputs) - triggered_from = ( WorkflowRunTriggeredFrom.DEBUGGING if self._application_generate_entity.invoke_from == InvokeFrom.DEBUGGER else WorkflowRunTriggeredFrom.APP_RUN ) + # handle special values + inputs = WorkflowEntry.handle_special_values(inputs) + # init workflow run - workflow_run = WorkflowRun() - workflow_run_id = self._workflow_system_variables[SystemVariableKey.WORKFLOW_RUN_ID] - if workflow_run_id: - workflow_run.id = workflow_run_id - workflow_run.tenant_id = self._workflow.tenant_id - workflow_run.app_id = self._workflow.app_id - workflow_run.sequence_number = new_sequence_number - workflow_run.workflow_id = self._workflow.id - workflow_run.type = self._workflow.type - workflow_run.triggered_from = triggered_from.value - workflow_run.version = self._workflow.version - workflow_run.graph = self._workflow.graph - workflow_run.inputs = json.dumps(inputs) - workflow_run.status = WorkflowRunStatus.RUNNING.value - workflow_run.created_by_role = ( - CreatedByRole.ACCOUNT.value if isinstance(self._user, Account) else CreatedByRole.END_USER.value - ) - workflow_run.created_by = self._user.id + with Session(db.engine, expire_on_commit=False) as session: + workflow_run = WorkflowRun() + system_id = self._workflow_system_variables[SystemVariableKey.WORKFLOW_RUN_ID] + workflow_run.id = system_id or str(uuid4()) + workflow_run.tenant_id = self._workflow.tenant_id + workflow_run.app_id = self._workflow.app_id + workflow_run.sequence_number = new_sequence_number + workflow_run.workflow_id = self._workflow.id + workflow_run.type = self._workflow.type + workflow_run.triggered_from = triggered_from.value + workflow_run.version = self._workflow.version + workflow_run.graph = self._workflow.graph + workflow_run.inputs = json.dumps(inputs) + workflow_run.status = WorkflowRunStatus.RUNNING + workflow_run.created_by_role = ( + CreatedByRole.ACCOUNT if isinstance(self._user, Account) else CreatedByRole.END_USER + ) + workflow_run.created_by = self._user.id + workflow_run.created_at = datetime.now(UTC).replace(tzinfo=None) - db.session.add(workflow_run) - db.session.commit() - db.session.refresh(workflow_run) - db.session.close() + session.add(workflow_run) + session.commit() return workflow_run diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index 79701e4ea4f547..b24324708b0067 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -453,7 +453,7 @@ def _to_credential_kwargs(self, credentials: dict) -> dict: return credentials_kwargs - def _convert_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]: + def _convert_prompt_messages(self, prompt_messages: Sequence[PromptMessage]) -> tuple[str, list[dict]]: """ Convert prompt messages to dict list and system """ @@ -461,7 +461,15 @@ def _convert_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tupl first_loop = True for message in prompt_messages: if isinstance(message, SystemPromptMessage): - message.content = message.content.strip() + if isinstance(message.content, str): + message.content = message.content.strip() + elif isinstance(message.content, list): + # System prompt only support text + message.content = "".join( + c.data.strip() for c in message.content if isinstance(c, TextPromptMessageContent) + ) + else: + raise ValueError(f"Unknown system prompt message content type {type(message.content)}") if first_loop: system = message.content first_loop = False diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index e61a9e0474b101..4cf58275d79fe3 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -779,7 +779,7 @@ class AzureBaseModel(BaseModel): name="frequency_penalty", **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], ), - _get_max_tokens(default=512, min_val=1, max_val=4096), + _get_max_tokens(default=512, min_val=1, max_val=16384), ParameterRule( name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), diff --git a/api/core/model_runtime/model_providers/azure_openai/tts/tts.py b/api/core/model_runtime/model_providers/azure_openai/tts/tts.py index 133cc9f76e0720..173b9d250c1743 100644 --- a/api/core/model_runtime/model_providers/azure_openai/tts/tts.py +++ b/api/core/model_runtime/model_providers/azure_openai/tts/tts.py @@ -14,7 +14,7 @@ class AzureOpenAIText2SpeechModel(_CommonAzureOpenAI, TTSModel): """ - Model class for OpenAI Speech to text model. + Model class for OpenAI text2speech model. """ def _invoke( diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml index 4973ac8ad6981c..0bbd27ad746c44 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml @@ -5,6 +5,7 @@ label: model_type: llm features: - agent-thought + - tool-call - multi-tool-call - stream-tool-call model_properties: @@ -72,7 +73,7 @@ parameter_rules: - text - json_object pricing: - input: '1' - output: '2' - unit: '0.000001' + input: "1" + output: "2" + unit: "0.000001" currency: RMB diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml index caafeadadd999e..97310e76b9a6f4 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml +++ b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml @@ -5,6 +5,7 @@ label: model_type: llm features: - agent-thought + - tool-call - multi-tool-call - stream-tool-call model_properties: diff --git a/api/core/model_runtime/model_providers/deepseek/llm/llm.py b/api/core/model_runtime/model_providers/deepseek/llm/llm.py index 6d0a3ee2628ea2..610dc7b4589e9d 100644 --- a/api/core/model_runtime/model_providers/deepseek/llm/llm.py +++ b/api/core/model_runtime/model_providers/deepseek/llm/llm.py @@ -1,18 +1,17 @@ from collections.abc import Generator from typing import Optional, Union -from urllib.parse import urlparse -import tiktoken +from yarl import URL -from core.model_runtime.entities.llm_entities import LLMResult +from core.model_runtime.entities.llm_entities import LLMMode, LLMResult from core.model_runtime.entities.message_entities import ( PromptMessage, PromptMessageTool, ) -from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel +from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel -class DeepSeekLargeLanguageModel(OpenAILargeLanguageModel): +class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel): def _invoke( self, model: str, @@ -25,92 +24,15 @@ def _invoke( user: Optional[str] = None, ) -> Union[LLMResult, Generator]: self._add_custom_parameters(credentials) - - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) + return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) def validate_credentials(self, model: str, credentials: dict) -> None: self._add_custom_parameters(credentials) super().validate_credentials(model, credentials) - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_string(self, model: str, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int: - """ - Calculate num tokens for text completion model with tiktoken package. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - encoding = tiktoken.get_encoding("cl100k_base") - num_tokens = len(encoding.encode(text)) - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_messages( - self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. - - Official documentation: https://github.com/openai/openai-cookbook/blob/ - main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" - encoding = tiktoken.get_encoding("cl100k_base") - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += len(encoding.encode(t_key)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(encoding.encode(f_key)) - num_tokens += len(encoding.encode(f_value)) - else: - num_tokens += len(encoding.encode(t_key)) - num_tokens += len(encoding.encode(t_value)) - else: - num_tokens += len(encoding.encode(str(value))) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - @staticmethod - def _add_custom_parameters(credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["openai_api_key"] = credentials["api_key"] - if "endpoint_url" not in credentials or credentials["endpoint_url"] == "": - credentials["openai_api_base"] = "https://api.deepseek.com" - else: - parsed_url = urlparse(credentials["endpoint_url"]) - credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}" + def _add_custom_parameters(credentials) -> None: + credentials["endpoint_url"] = str(URL(credentials.get("endpoint_url", "https://api.deepseek.com"))) + credentials["mode"] = LLMMode.CHAT.value + credentials["function_calling_type"] = "tool_call" + credentials["stream_function_calling"] = "support" diff --git a/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py b/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py index 231345c2f4e231..832ba927406c4c 100644 --- a/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py @@ -122,7 +122,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.RERANK, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))}, ) return entity diff --git a/api/core/model_runtime/model_providers/gitee_ai/tts/tts.py b/api/core/model_runtime/model_providers/gitee_ai/tts/tts.py index ed2bd5b13ddce4..36dcea405d0974 100644 --- a/api/core/model_runtime/model_providers/gitee_ai/tts/tts.py +++ b/api/core/model_runtime/model_providers/gitee_ai/tts/tts.py @@ -10,7 +10,7 @@ class GiteeAIText2SpeechModel(_CommonGiteeAI, TTSModel): """ - Model class for OpenAI Speech to text model. + Model class for OpenAI text2speech model. """ def _invoke( diff --git a/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py b/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py index 5ea7532564098d..feb57770285e4e 100644 --- a/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py @@ -140,7 +140,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.RERANK, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))}, ) return entity diff --git a/api/core/model_runtime/model_providers/jina/rerank/rerank.py b/api/core/model_runtime/model_providers/jina/rerank/rerank.py index aacc8e75d3ad07..22f882be6bdc4b 100644 --- a/api/core/model_runtime/model_providers/jina/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/jina/rerank/rerank.py @@ -128,7 +128,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.RERANK, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000))}, ) return entity diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index 49c558f4a44ffa..f5be7a98289a3a 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -193,7 +193,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000))}, ) return entity diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py index a16c91cd7ef81e..83c4facc8db76c 100644 --- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py @@ -139,7 +139,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index aea884e0023d0c..07cb1e2d1018f9 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -943,6 +943,9 @@ def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: } elif isinstance(message, SystemPromptMessage): message = cast(SystemPromptMessage, message) + if isinstance(message.content, list): + text_contents = filter(lambda c: isinstance(c, TextPromptMessageContent), message.content) + message.content = "".join(c.data for c in text_contents) message_dict = {"role": "system", "content": message.content} elif isinstance(message, ToolPromptMessage): message = cast(ToolPromptMessage, message) diff --git a/api/core/model_runtime/model_providers/openai/tts/tts.py b/api/core/model_runtime/model_providers/openai/tts/tts.py index 2e57b95944c6b4..dac37f0c7f5176 100644 --- a/api/core/model_runtime/model_providers/openai/tts/tts.py +++ b/api/core/model_runtime/model_providers/openai/tts/tts.py @@ -11,7 +11,7 @@ class OpenAIText2SpeechModel(_CommonOpenAI, TTSModel): """ - Model class for OpenAI Speech to text model. + Model class for OpenAI text2speech model. """ def _invoke( diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml b/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml index 69a081f35c10a6..2b8dcb72d8b0f6 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml +++ b/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml @@ -9,6 +9,7 @@ supported_model_types: - text-embedding - speech2text - rerank + - tts configurate_methods: - customizable-model model_credential_schema: @@ -67,7 +68,7 @@ model_credential_schema: - variable: __model_type value: llm type: text-input - default: '4096' + default: "4096" placeholder: zh_Hans: 在此输入您的模型上下文长度 en_US: Enter your Model context size @@ -80,7 +81,7 @@ model_credential_schema: - variable: __model_type value: text-embedding type: text-input - default: '4096' + default: "4096" placeholder: zh_Hans: 在此输入您的模型上下文长度 en_US: Enter your Model context size @@ -93,7 +94,7 @@ model_credential_schema: - variable: __model_type value: rerank type: text-input - default: '4096' + default: "4096" placeholder: zh_Hans: 在此输入您的模型上下文长度 en_US: Enter your Model context size @@ -104,7 +105,7 @@ model_credential_schema: show_on: - variable: __model_type value: llm - default: '4096' + default: "4096" type: text-input - variable: function_calling_type show_on: @@ -174,3 +175,19 @@ model_credential_schema: value: llm default: '\n\n' type: text-input + - variable: voices + show_on: + - variable: __model_type + value: tts + label: + en_US: Available Voices (comma-separated) + zh_Hans: 可用声音(用英文逗号分隔) + type: text-input + required: false + default: "alloy" + placeholder: + en_US: "alloy,echo,fable,onyx,nova,shimmer" + zh_Hans: "alloy,echo,fable,onyx,nova,shimmer" + help: + en_US: "List voice names separated by commas. First voice will be used as default." + zh_Hans: "用英文逗号分隔的声音列表。第一个声音将作为默认值。" diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py index c2b7297aac596e..793c384d5a8079 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py @@ -176,7 +176,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/tts/__init__.py b/api/core/model_runtime/model_providers/openai_api_compatible/tts/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/tts/tts.py b/api/core/model_runtime/model_providers/openai_api_compatible/tts/tts.py new file mode 100644 index 00000000000000..8239c625f7ada8 --- /dev/null +++ b/api/core/model_runtime/model_providers/openai_api_compatible/tts/tts.py @@ -0,0 +1,145 @@ +from collections.abc import Iterable +from typing import Optional +from urllib.parse import urljoin + +import requests + +from core.model_runtime.entities.common_entities import I18nObject +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType +from core.model_runtime.errors.invoke import InvokeBadRequestError +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.tts_model import TTSModel +from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOaiApiCompat + + +class OAICompatText2SpeechModel(_CommonOaiApiCompat, TTSModel): + """ + Model class for OpenAI-compatible text2speech model. + """ + + def _invoke( + self, + model: str, + tenant_id: str, + credentials: dict, + content_text: str, + voice: str, + user: Optional[str] = None, + ) -> Iterable[bytes]: + """ + Invoke TTS model + + :param model: model name + :param tenant_id: user tenant id + :param credentials: model credentials + :param content_text: text content to be translated + :param voice: model voice/speaker + :param user: unique user id + :return: audio data as bytes iterator + """ + # Set up headers with authentication if provided + headers = {} + if api_key := credentials.get("api_key"): + headers["Authorization"] = f"Bearer {api_key}" + + # Construct endpoint URL + endpoint_url = credentials.get("endpoint_url") + if not endpoint_url.endswith("/"): + endpoint_url += "/" + endpoint_url = urljoin(endpoint_url, "audio/speech") + + # Get audio format from model properties + audio_format = self._get_model_audio_type(model, credentials) + + # Split text into chunks if needed based on word limit + word_limit = self._get_model_word_limit(model, credentials) + sentences = self._split_text_into_sentences(content_text, word_limit) + + for sentence in sentences: + # Prepare request payload + payload = {"model": model, "input": sentence, "voice": voice, "response_format": audio_format} + + # Make POST request + response = requests.post(endpoint_url, headers=headers, json=payload, stream=True) + + if response.status_code != 200: + raise InvokeBadRequestError(response.text) + + # Stream the audio data + for chunk in response.iter_content(chunk_size=4096): + if chunk: + yield chunk + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + # Get default voice for validation + voice = self._get_model_default_voice(model, credentials) + + # Test with a simple text + next( + self._invoke( + model=model, tenant_id="validate", credentials=credentials, content_text="Test.", voice=voice + ) + ) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: + """ + Get customizable model schema + """ + # Parse voices from comma-separated string + voice_names = credentials.get("voices", "alloy").strip().split(",") + voices = [] + + for voice in voice_names: + voice = voice.strip() + if not voice: + continue + + # Use en-US for all voices + voices.append( + { + "name": voice, + "mode": voice, + "language": "en-US", + } + ) + + # If no voices provided or all voices were empty strings, use 'alloy' as default + if not voices: + voices = [{"name": "Alloy", "mode": "alloy", "language": "en-US"}] + + return AIModelEntity( + model=model, + label=I18nObject(en_US=model), + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_type=ModelType.TTS, + model_properties={ + ModelPropertyKey.AUDIO_TYPE: credentials.get("audio_type", "mp3"), + ModelPropertyKey.WORD_LIMIT: int(credentials.get("word_limit", 4096)), + ModelPropertyKey.DEFAULT_VOICE: voices[0]["mode"], + ModelPropertyKey.VOICES: voices, + }, + ) + + def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list: + """ + Override base get_tts_model_voices to handle customizable voices + """ + model_schema = self.get_customizable_model_schema(model, credentials) + + if not model_schema or ModelPropertyKey.VOICES not in model_schema.model_properties: + raise ValueError("this model does not support voice") + + voices = model_schema.model_properties[ModelPropertyKey.VOICES] + + # Always return all voices regardless of language + return [{"name": d["name"], "value": d["mode"]} for d in voices] diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py index d78bdaa75e5423..7bbd31e87c595d 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py @@ -182,7 +182,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml index f010e4c8265d47..b52df3e4e3fdee 100644 --- a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml @@ -24,4 +24,3 @@ - meta-llama/Meta-Llama-3.1-8B-Instruct - google/gemma-2-27b-it - google/gemma-2-9b-it -- deepseek-ai/DeepSeek-V2-Chat diff --git a/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml b/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml index 71f9a9238145c0..73a9e80769b540 100644 --- a/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml +++ b/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml @@ -18,6 +18,7 @@ supported_model_types: - text-embedding - rerank - speech2text + - tts configurate_methods: - predefined-model - customizable-model diff --git a/api/core/model_runtime/model_providers/siliconflow/tts/__init__.py b/api/core/model_runtime/model_providers/siliconflow/tts/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/model_runtime/model_providers/siliconflow/tts/fish-speech-1.4.yaml b/api/core/model_runtime/model_providers/siliconflow/tts/fish-speech-1.4.yaml new file mode 100644 index 00000000000000..4adfd05c601850 --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/tts/fish-speech-1.4.yaml @@ -0,0 +1,37 @@ +model: fishaudio/fish-speech-1.4 +model_type: tts +model_properties: + default_voice: 'fishaudio/fish-speech-1.4:alex' + voices: + - mode: "fishaudio/fish-speech-1.4:alex" + name: "Alex(男声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:benjamin" + name: "Benjamin(男声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:charles" + name: "Charles(男声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:david" + name: "David(男声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:anna" + name: "Anna(女声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:bella" + name: "Bella(女声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:claire" + name: "Claire(女声)" + language: [ "zh-Hans", "en-US" ] + - mode: "fishaudio/fish-speech-1.4:diana" + name: "Diana(女声)" + language: [ "zh-Hans", "en-US" ] + audio_type: 'mp3' + max_workers: 5 + # stream: false +pricing: + input: '0.015' + output: '0' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/tts/tts.py b/api/core/model_runtime/model_providers/siliconflow/tts/tts.py new file mode 100644 index 00000000000000..a5554abb73eff3 --- /dev/null +++ b/api/core/model_runtime/model_providers/siliconflow/tts/tts.py @@ -0,0 +1,105 @@ +import concurrent.futures +from typing import Any, Optional + +from openai import OpenAI + +from core.model_runtime.errors.invoke import InvokeBadRequestError +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.tts_model import TTSModel +from core.model_runtime.model_providers.openai._common import _CommonOpenAI + + +class SiliconFlowText2SpeechModel(_CommonOpenAI, TTSModel): + """ + Model class for SiliconFlow Speech to text model. + """ + + def _invoke( + self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None + ) -> Any: + """ + _invoke text2speech model + + :param model: model name + :param tenant_id: user tenant id + :param credentials: model credentials + :param content_text: text content to be translated + :param voice: model timbre + :param user: unique user id + :return: text translated to audio file + """ + if not voice or voice not in [ + d["value"] for d in self.get_tts_model_voices(model=model, credentials=credentials) + ]: + voice = self._get_model_default_voice(model, credentials) + # if streaming: + return self._tts_invoke_streaming(model=model, credentials=credentials, content_text=content_text, voice=voice) + + def validate_credentials(self, model: str, credentials: dict, user: Optional[str] = None) -> None: + """ + validate credentials text2speech model + + :param model: model name + :param credentials: model credentials + :param user: unique user id + :return: text translated to audio file + """ + try: + self._tts_invoke_streaming( + model=model, + credentials=credentials, + content_text="Hello SiliconFlow!", + voice=self._get_model_default_voice(model, credentials), + ) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> Any: + """ + _tts_invoke_streaming text2speech model + + :param model: model name + :param credentials: model credentials + :param content_text: text content to be translated + :param voice: model timbre + :return: text translated to audio file + """ + try: + # doc: https://docs.siliconflow.cn/capabilities/text-to-speech + self._add_custom_parameters(credentials) + credentials_kwargs = self._to_credential_kwargs(credentials) + client = OpenAI(**credentials_kwargs) + model_support_voice = [ + x.get("value") for x in self.get_tts_model_voices(model=model, credentials=credentials) + ] + if not voice or voice not in model_support_voice: + voice = self._get_model_default_voice(model, credentials) + if len(content_text) > 4096: + sentences = self._split_text_into_sentences(content_text, max_length=4096) + executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences))) + futures = [ + executor.submit( + client.audio.speech.with_streaming_response.create, + model=model, + response_format="mp3", + input=sentences[i], + voice=voice, + ) + for i in range(len(sentences)) + ] + for future in futures: + yield from future.result().__enter__().iter_bytes(1024) # noqa:PLC2801 + + else: + response = client.audio.speech.with_streaming_response.create( + model=model, voice=voice, response_format="mp3", input=content_text.strip() + ) + + yield from response.__enter__().iter_bytes(1024) # noqa:PLC2801 + except Exception as ex: + raise InvokeBadRequestError(str(ex)) + + @classmethod + def _add_custom_parameters(cls, credentials: dict) -> None: + credentials["openai_api_base"] = "https://api.siliconflow.cn" + credentials["openai_api_key"] = credentials["api_key"] diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py index 43233e61262264..9cd0c78d99df24 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py @@ -173,7 +173,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py index e69c9fccba97ed..16f1bd43d8820d 100644 --- a/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py @@ -166,7 +166,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))}, ) return entity diff --git a/api/core/model_runtime/model_providers/x/llm/grok-beta.yaml b/api/core/model_runtime/model_providers/x/llm/grok-beta.yaml index 7c305735b99e33..bb71de2badb335 100644 --- a/api/core/model_runtime/model_providers/x/llm/grok-beta.yaml +++ b/api/core/model_runtime/model_providers/x/llm/grok-beta.yaml @@ -1,9 +1,12 @@ model: grok-beta label: - en_US: Grok beta + en_US: Grok Beta model_type: llm features: + - agent-thought + - tool-call - multi-tool-call + - stream-tool-call model_properties: mode: chat context_size: 131072 diff --git a/api/core/model_runtime/model_providers/x/llm/grok-vision-beta.yaml b/api/core/model_runtime/model_providers/x/llm/grok-vision-beta.yaml new file mode 100644 index 00000000000000..844f0520bc64fb --- /dev/null +++ b/api/core/model_runtime/model_providers/x/llm/grok-vision-beta.yaml @@ -0,0 +1,64 @@ +model: grok-vision-beta +label: + en_US: Grok Vision Beta +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + label: + en_US: "Temperature" + zh_Hans: "采样温度" + type: float + default: 0.7 + min: 0.0 + max: 2.0 + precision: 1 + required: true + help: + en_US: "The randomness of the sampling temperature control output. The temperature value is within the range of [0.0, 1.0]. The higher the value, the more random and creative the output; the lower the value, the more stable it is. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time." + zh_Hans: "采样温度控制输出的随机性。温度值在 [0.0, 1.0] 范围内,值越高,输出越随机和创造性;值越低,输出越稳定。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。" + + - name: top_p + label: + en_US: "Top P" + zh_Hans: "Top P" + type: float + default: 0.7 + min: 0.0 + max: 1.0 + precision: 1 + required: true + help: + en_US: "The value range of the sampling method is [0.0, 1.0]. The top_p value determines that the model selects tokens from the top p% of candidate words with the highest probability; when top_p is 0, this parameter is invalid. It is recommended to adjust either top_p or temperature parameters according to your needs to avoid adjusting both at the same time." + zh_Hans: "采样方法的取值范围为 [0.0,1.0]。top_p 值确定模型从概率最高的前p%的候选词中选取 tokens;当 top_p 为 0 时,此参数无效。建议根据需求调整 top_p 或 temperature 参数,避免同时调整两者。" + + - name: frequency_penalty + use_template: frequency_penalty + label: + en_US: "Frequency Penalty" + zh_Hans: "频率惩罚" + type: float + default: 0 + min: 0 + max: 2.0 + precision: 1 + required: false + help: + en_US: "Number between 0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + zh_Hans: "介于0和2.0之间的数字。正值会根据新标记在文本中迄今为止的现有频率来惩罚它们,从而降低模型一字不差地重复同一句话的可能性。" + + - name: user + use_template: text + label: + en_US: "User" + zh_Hans: "用户" + type: string + required: false + help: + en_US: "Used to track and differentiate conversation requests from different users." + zh_Hans: "用于追踪和区分不同用户的对话请求。" diff --git a/api/core/model_runtime/model_providers/x/llm/llm.py b/api/core/model_runtime/model_providers/x/llm/llm.py index 3f5325a857dc92..eacd086fee9d8d 100644 --- a/api/core/model_runtime/model_providers/x/llm/llm.py +++ b/api/core/model_runtime/model_providers/x/llm/llm.py @@ -35,3 +35,5 @@ def _add_custom_parameters(credentials) -> None: credentials["endpoint_url"] = str(URL(credentials["endpoint_url"])) or "https://api.x.ai/v1" credentials["mode"] = LLMMode.CHAT.value credentials["function_calling_type"] = "tool_call" + credentials["stream_function_calling"] = "support" + credentials["vision_support"] = "support" diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py index f629b62fd5385b..2428284ba9a8ff 100644 --- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py @@ -105,17 +105,6 @@ def embed_documents(self, model: str, client: ZhipuAI, texts: list[str]) -> tupl return [list(map(float, e)) for e in embeddings], embedding_used_tokens - def embed_query(self, text: str) -> list[float]: - """Call out to ZhipuAI's embedding endpoint. - - Args: - text: The text to embed. - - Returns: - Embeddings for the text. - """ - return self.embed_documents([text])[0] - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: """ Calculate response usage diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 1069889abda965..b7799ce1fbdd5e 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -445,7 +445,7 @@ def message_trace(self, message_id): "ls_provider": message_data.model_provider, "ls_model_name": message_data.model_id, "status": message_data.status, - "from_end_user_id": message_data.from_account_id, + "from_end_user_id": message_data.from_end_user_id, "from_account_id": message_data.from_account_id, "agent_based": message_data.agent_based, "workflow_run_id": message_data.workflow_run_id, @@ -521,7 +521,7 @@ def suggested_question_trace(self, message_id, timer, **kwargs): "ls_provider": message_data.model_provider, "ls_model_name": message_data.model_id, "status": message_data.status, - "from_end_user_id": message_data.from_account_id, + "from_end_user_id": message_data.from_end_user_id, "from_account_id": message_data.from_account_id, "agent_based": message_data.agent_based, "workflow_run_id": message_data.workflow_run_id, @@ -570,7 +570,7 @@ def dataset_retrieval_trace(self, message_id, timer, **kwargs): "ls_provider": message_data.model_provider, "ls_model_name": message_data.model_id, "status": message_data.status, - "from_end_user_id": message_data.from_account_id, + "from_end_user_id": message_data.from_end_user_id, "from_account_id": message_data.from_account_id, "agent_based": message_data.agent_based, "workflow_run_id": message_data.workflow_run_id, diff --git a/api/core/tools/provider/builtin/chart/chart.py b/api/core/tools/provider/builtin/chart/chart.py index dfa3fbea6aaeb9..8fa647d9ed8138 100644 --- a/api/core/tools/provider/builtin/chart/chart.py +++ b/api/core/tools/provider/builtin/chart/chart.py @@ -1,3 +1,4 @@ +import matplotlib import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties, fontManager @@ -5,7 +6,7 @@ def set_chinese_font(): - font_list = [ + to_find_fonts = [ "PingFang SC", "SimHei", "Microsoft YaHei", @@ -15,16 +16,16 @@ def set_chinese_font(): "Noto Sans CJK SC", "Noto Sans CJK JP", ] - - for font in font_list: - if font in fontManager.ttflist: - chinese_font = FontProperties(font) - if chinese_font.get_name() == font: - return chinese_font + installed_fonts = frozenset(fontInfo.name for fontInfo in fontManager.ttflist) + for font in to_find_fonts: + if font in installed_fonts: + return FontProperties(font) return FontProperties() +# use non-interactive backend to prevent `RuntimeError: main thread is not in main loop` +matplotlib.use("Agg") # use a business theme plt.style.use("seaborn-v0_8-darkgrid") plt.rcParams["axes.unicode_minus"] = False diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.py b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.py index 54bb38755a5b5c..b3c630878f3c62 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.py +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.py @@ -18,6 +18,12 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInv "size": tool_parameters.get("size"), "max_results": tool_parameters.get("max_results"), } + + # Add query_prefix handling + query_prefix = tool_parameters.get("query_prefix", "").strip() + final_query = f"{query_prefix} {query_dict['keywords']}".strip() + query_dict["keywords"] = final_query + response = DDGS().images(**query_dict) markdown_result = "\n\n" json_result = [] diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.yaml b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.yaml index 168cface224e40..a543d1e218b578 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.yaml +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_img.yaml @@ -86,3 +86,14 @@ parameters: en_US: The size of the image to be searched. zh_Hans: 要搜索的图片的大小 form: form + - name: query_prefix + label: + en_US: Query Prefix + zh_Hans: 查询前缀 + type: string + required: false + default: "" + form: form + human_description: + en_US: Specific Search e.g. "site:unsplash.com" + zh_Hans: 定向搜索 e.g. "site:unsplash.com" diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.py b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.py index 3a6fd394a8f4ec..11da6f5cf76580 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.py +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.py @@ -7,7 +7,7 @@ from core.tools.tool.builtin_tool import BuiltinTool SUMMARY_PROMPT = """ -User's query: +User's query: {query} Here are the news results: @@ -30,6 +30,12 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe "safesearch": "moderate", "region": "wt-wt", } + + # Add query_prefix handling + query_prefix = tool_parameters.get("query_prefix", "").strip() + final_query = f"{query_prefix} {query_dict['keywords']}".strip() + query_dict["keywords"] = final_query + try: response = list(DDGS().news(**query_dict)) if not response: diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.yaml b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.yaml index eb2b67b7c9d832..6e181e0f41c22f 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.yaml +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_news.yaml @@ -69,3 +69,14 @@ parameters: en_US: Whether to pass the news results to llm for summarization. zh_Hans: 是否需要将新闻结果传给大模型总结 form: form + - name: query_prefix + label: + en_US: Query Prefix + zh_Hans: 查询前缀 + type: string + required: false + default: "" + form: form + human_description: + en_US: Specific Search e.g. "site:msn.com" + zh_Hans: 定向搜索 e.g. "site:msn.com" diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.py b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.py index cbd65d2e7756e0..3cd35d16a6f460 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.py +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.py @@ -7,7 +7,7 @@ from core.tools.tool.builtin_tool import BuiltinTool SUMMARY_PROMPT = """ -User's query: +User's query: {query} Here is the search engine result: @@ -26,7 +26,12 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe query = tool_parameters.get("query") max_results = tool_parameters.get("max_results", 5) require_summary = tool_parameters.get("require_summary", False) - response = DDGS().text(query, max_results=max_results) + + # Add query_prefix handling + query_prefix = tool_parameters.get("query_prefix", "").strip() + final_query = f"{query_prefix} {query}".strip() + + response = DDGS().text(final_query, max_results=max_results) if require_summary: results = "\n".join([res.get("body") for res in response]) results = self.summary_results(user_id=user_id, content=results, query=query) diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.yaml b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.yaml index 333c0cb093dbd2..54e27d9905da12 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.yaml +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_search.yaml @@ -39,3 +39,14 @@ parameters: en_US: Whether to pass the search results to llm for summarization. zh_Hans: 是否需要将搜索结果传给大模型总结 form: form + - name: query_prefix + label: + en_US: Query Prefix + zh_Hans: 查询前缀 + type: string + required: false + default: "" + form: form + human_description: + en_US: Specific Search e.g. "site:wikipedia.org" + zh_Hans: 定向搜索 e.g. "site:wikipedia.org" diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.py b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.py index 4b74b223c1229c..1eef0b1ba23d42 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.py +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.py @@ -24,7 +24,7 @@ class DuckDuckGoVideoSearchTool(BuiltinTool): def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInvokeMessage]: query_dict = { - "keywords": tool_parameters.get("query"), + "keywords": tool_parameters.get("query"), # LLM's query "region": tool_parameters.get("region", "wt-wt"), "safesearch": tool_parameters.get("safesearch", "moderate"), "timelimit": tool_parameters.get("timelimit"), @@ -40,6 +40,12 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInv # Get proxy URL from parameters proxy_url = tool_parameters.get("proxy_url", "").strip() + query_prefix = tool_parameters.get("query_prefix", "").strip() + final_query = f"{query_prefix} {query_dict['keywords']}".strip() + + # Update the keywords in query_dict with the final_query + query_dict["keywords"] = final_query + response = DDGS().videos(**query_dict) # Create HTML result with embedded iframes @@ -51,9 +57,13 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInv embed_html = res.get("embed_html", "") description = res.get("description", "") content_url = res.get("content", "") + transcript_url = None # Handle TED.com videos - if not embed_html and "ted.com/talks" in content_url: + if "ted.com/talks" in content_url: + # Create transcript URL + transcript_url = f"{content_url}/transcript" + # Create embed URL embed_url = content_url.replace("www.ted.com", "embed.ted.com") if proxy_url: embed_url = f"{proxy_url}{embed_url}" @@ -68,8 +78,14 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> list[ToolInv markdown_result += f"{title}\n\n" markdown_result += f"{embed_html}\n\n" + if description: + markdown_result += f"{description}\n\n" markdown_result += "---\n\n" - json_result.append(self.create_json_message(res)) + # Add transcript_url to the JSON result if available + result_dict = res.copy() + if transcript_url: + result_dict["transcript_url"] = transcript_url + json_result.append(self.create_json_message(result_dict)) return [self.create_text_message(markdown_result)] + json_result diff --git a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.yaml b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.yaml index a516d3cb98b3fb..d846244e3dfcbd 100644 --- a/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.yaml +++ b/api/core/tools/provider/builtin/duckduckgo/tools/ddgo_video.yaml @@ -95,3 +95,14 @@ parameters: en_US: Proxy URL zh_Hans: 视频代理地址 form: form + - name: query_prefix + label: + en_US: Query Prefix + zh_Hans: 查询前缀 + type: string + required: false + default: "" + form: form + human_description: + en_US: Specific Search e.g. "site:www.ted.com" + zh_Hans: 定向搜索 e.g. "site:www.ted.com" diff --git a/api/core/tools/provider/builtin/searchapi/tools/google.py b/api/core/tools/provider/builtin/searchapi/tools/google.py index 17e2978194c6a3..29d36f5f232694 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google.py @@ -45,7 +45,7 @@ def get_params(self, query: str, **kwargs: Any) -> dict[str, str]: def _process_response(res: dict, type: str) -> str: """Process response from SearchAPI.""" if "error" in res: - raise ValueError(f"Got error from SearchApi: {res['error']}") + return res["error"] toret = "" if type == "text": diff --git a/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py b/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py index c478bc108b47e1..de42360898b7e0 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google_jobs.py @@ -45,7 +45,7 @@ def get_params(self, query: str, **kwargs: Any) -> dict[str, str]: def _process_response(res: dict, type: str) -> str: """Process response from SearchAPI.""" if "error" in res: - raise ValueError(f"Got error from SearchApi: {res['error']}") + return res["error"] toret = "" if type == "text": diff --git a/api/core/tools/provider/builtin/searchapi/tools/google_news.py b/api/core/tools/provider/builtin/searchapi/tools/google_news.py index 562bc01964b4c3..c8b3ccda05e195 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google_news.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google_news.py @@ -45,7 +45,7 @@ def get_params(self, query: str, **kwargs: Any) -> dict[str, str]: def _process_response(res: dict, type: str) -> str: """Process response from SearchAPI.""" if "error" in res: - raise ValueError(f"Got error from SearchApi: {res['error']}") + return res["error"] toret = "" if type == "text": diff --git a/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py b/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py index 1867cf7be79be5..b14821f8312dd0 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py +++ b/api/core/tools/provider/builtin/searchapi/tools/youtube_transcripts.py @@ -45,7 +45,7 @@ def get_params(self, video_id: str, language: str, **kwargs: Any) -> dict[str, s def _process_response(res: dict) -> str: """Process response from SearchAPI.""" if "error" in res: - raise ValueError(f"Got error from SearchApi: {res['error']}") + return res["error"] toret = "" if "transcripts" in res and "text" in res["transcripts"][0]: diff --git a/api/core/tools/provider/builtin/slidespeak/tools/slides_generator.py b/api/core/tools/provider/builtin/slidespeak/tools/slides_generator.py index 74742bf4b70a03..aa4ee63e9767c9 100644 --- a/api/core/tools/provider/builtin/slidespeak/tools/slides_generator.py +++ b/api/core/tools/provider/builtin/slidespeak/tools/slides_generator.py @@ -149,7 +149,7 @@ async def async_invoke(): presentation_bytes = await self._fetch_presentation(session, download_url) return [ - self.create_text_message("Presentation generated successfully"), + self.create_text_message(download_url), self.create_blob_message( blob=presentation_bytes, meta={"mime_type": "application/vnd.openxmlformats-officedocument.presentationml.presentation"}, diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index dbd97c8151e738..f92b43608ed935 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -61,7 +61,12 @@ def agent_invoke( if parameters and len(parameters) == 1: tool_parameters = {parameters[0].name: tool_parameters} else: - raise ValueError(f"tool_parameters should be a dict, but got a string: {tool_parameters}") + try: + tool_parameters = json.loads(tool_parameters) + except Exception as e: + pass + if not isinstance(tool_parameters, dict): + raise ValueError(f"tool_parameters should be a dict, but got a string: {tool_parameters}") # invoke the tool try: diff --git a/api/core/workflow/entities/node_entities.py b/api/core/workflow/entities/node_entities.py index 1ac64e94ef69ea..e174d3baa0c736 100644 --- a/api/core/workflow/entities/node_entities.py +++ b/api/core/workflow/entities/node_entities.py @@ -36,7 +36,7 @@ class NodeRunResult(BaseModel): inputs: Optional[Mapping[str, Any]] = None # node inputs process_data: Optional[dict[str, Any]] = None # process data - outputs: Optional[dict[str, Any]] = None # node outputs + outputs: Optional[Mapping[str, Any]] = None # node outputs metadata: Optional[dict[NodeRunMetadataKey, Any]] = None # node metadata llm_usage: Optional[LLMUsage] = None # llm usage diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 4a6f0ecae9b6be..39480e34b333e2 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -20,6 +20,7 @@ from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, + PromptMessageContent, PromptMessageRole, SystemPromptMessage, UserPromptMessage, @@ -66,7 +67,6 @@ ModelConfig, ) from .exc import ( - FileTypeNotSupportError, InvalidContextStructureError, InvalidVariableTypeError, LLMModeRequiredError, @@ -137,12 +137,12 @@ def _run(self) -> NodeRunResult | Generator[NodeEvent | InNodeEvent, None, None] query = None if self.node_data.memory: query = self.node_data.memory.query_prompt_template - if query is None and ( - query_variable := self.graph_runtime_state.variable_pool.get( - (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY) - ) - ): - query = query_variable.text + if not query and ( + query_variable := self.graph_runtime_state.variable_pool.get( + (SYSTEM_VARIABLE_NODE_ID, SystemVariableKey.QUERY) + ) + ): + query = query_variable.text prompt_messages, stop = self._fetch_prompt_messages( user_query=query, @@ -675,7 +675,7 @@ def _fetch_prompt_messages( and ModelFeature.AUDIO not in model_config.model_schema.features ) ): - raise FileTypeNotSupportError(type_name=content_item.type) + continue prompt_message_content.append(content_item) if len(prompt_message_content) == 1 and prompt_message_content[0].type == PromptMessageContentType.TEXT: prompt_message.content = prompt_message_content[0].data @@ -828,14 +828,14 @@ def get_default_config(cls, filters: Optional[dict] = None) -> dict: } -def _combine_text_message_with_role(*, text: str, role: PromptMessageRole): +def _combine_message_content_with_role(*, contents: Sequence[PromptMessageContent], role: PromptMessageRole): match role: case PromptMessageRole.USER: - return UserPromptMessage(content=[TextPromptMessageContent(data=text)]) + return UserPromptMessage(content=contents) case PromptMessageRole.ASSISTANT: - return AssistantPromptMessage(content=[TextPromptMessageContent(data=text)]) + return AssistantPromptMessage(content=contents) case PromptMessageRole.SYSTEM: - return SystemPromptMessage(content=[TextPromptMessageContent(data=text)]) + return SystemPromptMessage(content=contents) raise NotImplementedError(f"Role {role} is not supported") @@ -877,7 +877,9 @@ def _handle_list_messages( jinjia2_variables=jinja2_variables, variable_pool=variable_pool, ) - prompt_message = _combine_text_message_with_role(text=result_text, role=message.role) + prompt_message = _combine_message_content_with_role( + contents=[TextPromptMessageContent(data=result_text)], role=message.role + ) prompt_messages.append(prompt_message) else: # Get segment group from basic message @@ -908,12 +910,14 @@ def _handle_list_messages( # Create message with text from all segments plain_text = segment_group.text if plain_text: - prompt_message = _combine_text_message_with_role(text=plain_text, role=message.role) + prompt_message = _combine_message_content_with_role( + contents=[TextPromptMessageContent(data=plain_text)], role=message.role + ) prompt_messages.append(prompt_message) if file_contents: # Create message with image contents - prompt_message = UserPromptMessage(content=file_contents) + prompt_message = _combine_message_content_with_role(contents=file_contents, role=message.role) prompt_messages.append(prompt_message) return prompt_messages @@ -1018,6 +1022,8 @@ def _handle_completion_template( else: template_text = template.text result_text = variable_pool.convert_template(template_text).text - prompt_message = _combine_text_message_with_role(text=result_text, role=PromptMessageRole.USER) + prompt_message = _combine_message_content_with_role( + contents=[TextPromptMessageContent(data=result_text)], role=PromptMessageRole.USER + ) prompt_messages.append(prompt_message) return prompt_messages diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 5560f2645674ab..951e5330a324e8 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -250,9 +250,8 @@ def _extract_tool_response_text(self, tool_response: list[ToolInvokeMessage]) -> f"{message.message}" if message.type == ToolInvokeMessage.MessageType.TEXT else f"Link: {message.message}" - if message.type == ToolInvokeMessage.MessageType.LINK - else "" for message in tool_response + if message.type in {ToolInvokeMessage.MessageType.TEXT, ToolInvokeMessage.MessageType.LINK} ] ) diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index 84b251223f96f1..6f7b143ad6edab 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -5,10 +5,9 @@ from typing import Any, Optional, cast from configs import dify_config -from core.app.app_config.entities import FileUploadConfig from core.app.apps.base_app_queue_manager import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom -from core.file.models import File, FileTransferMethod, ImageConfig +from core.file.models import File from core.workflow.callbacks import WorkflowCallback from core.workflow.entities.variable_pool import VariablePool from core.workflow.errors import WorkflowNodeRunFailedError @@ -18,9 +17,8 @@ from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState from core.workflow.graph_engine.graph_engine import GraphEngine from core.workflow.nodes import NodeType -from core.workflow.nodes.base import BaseNode, BaseNodeData +from core.workflow.nodes.base import BaseNode from core.workflow.nodes.event import NodeEvent -from core.workflow.nodes.llm import LLMNodeData from core.workflow.nodes.node_mapping import node_type_classes_mapping from factories import file_factory from models.enums import UserFrom @@ -115,7 +113,12 @@ def run( @classmethod def single_step_run( - cls, workflow: Workflow, node_id: str, user_id: str, user_inputs: dict + cls, + *, + workflow: Workflow, + node_id: str, + user_id: str, + user_inputs: dict, ) -> tuple[BaseNode, Generator[NodeEvent | InNodeEvent, None, None]]: """ Single step run workflow node @@ -135,13 +138,9 @@ def single_step_run( raise ValueError("nodes not found in workflow graph") # fetch node config from node id - node_config = None - for node in nodes: - if node.get("id") == node_id: - node_config = node - break - - if not node_config: + try: + node_config = next(filter(lambda node: node["id"] == node_id, nodes)) + except StopIteration: raise ValueError("node id not found in workflow graph") # Get node class @@ -153,11 +152,7 @@ def single_step_run( raise ValueError(f"Node class not found for node type {node_type}") # init variable pool - variable_pool = VariablePool( - system_variables={}, - user_inputs={}, - environment_variables=workflow.environment_variables, - ) + variable_pool = VariablePool(environment_variables=workflow.environment_variables) # init graph graph = Graph.init(graph_config=workflow.graph_dict) @@ -183,28 +178,24 @@ def single_step_run( try: # variable selector to variable mapping - try: - variable_mapping = node_cls.extract_variable_selector_to_variable_mapping( - graph_config=workflow.graph_dict, config=node_config - ) - except NotImplementedError: - variable_mapping = {} - - cls.mapping_user_inputs_to_variable_pool( - variable_mapping=variable_mapping, - user_inputs=user_inputs, - variable_pool=variable_pool, - tenant_id=workflow.tenant_id, - node_type=node_type, - node_data=node_instance.node_data, + variable_mapping = node_cls.extract_variable_selector_to_variable_mapping( + graph_config=workflow.graph_dict, config=node_config ) + except NotImplementedError: + variable_mapping = {} + cls.mapping_user_inputs_to_variable_pool( + variable_mapping=variable_mapping, + user_inputs=user_inputs, + variable_pool=variable_pool, + tenant_id=workflow.tenant_id, + ) + try: # run node generator = node_instance.run() - - return node_instance, generator except Exception as e: raise WorkflowNodeRunFailedError(node_instance=node_instance, error=str(e)) + return node_instance, generator @staticmethod def handle_special_values(value: Optional[Mapping[str, Any]]) -> Mapping[str, Any] | None: @@ -231,12 +222,11 @@ def _handle_special_values(value: Any) -> Any: @classmethod def mapping_user_inputs_to_variable_pool( cls, + *, variable_mapping: Mapping[str, Sequence[str]], user_inputs: dict, variable_pool: VariablePool, tenant_id: str, - node_type: NodeType, - node_data: BaseNodeData, ) -> None: for node_variable, variable_selector in variable_mapping.items(): # fetch node id and variable key from node_variable @@ -254,40 +244,21 @@ def mapping_user_inputs_to_variable_pool( # fetch variable node id from variable selector variable_node_id = variable_selector[0] variable_key_list = variable_selector[1:] - variable_key_list = cast(list[str], variable_key_list) + variable_key_list = list(variable_key_list) # get input value input_value = user_inputs.get(node_variable) if not input_value: input_value = user_inputs.get(node_variable_key) - # FIXME: temp fix for image type - if node_type == NodeType.LLM: - new_value = [] - if isinstance(input_value, list): - node_data = cast(LLMNodeData, node_data) - - detail = node_data.vision.configs.detail if node_data.vision.configs else None - - for item in input_value: - if isinstance(item, dict) and "type" in item and item["type"] == "image": - transfer_method = FileTransferMethod.value_of(item.get("transfer_method")) - mapping = { - "id": item.get("id"), - "transfer_method": transfer_method, - "upload_file_id": item.get("upload_file_id"), - "url": item.get("url"), - } - config = FileUploadConfig(image_config=ImageConfig(detail=detail) if detail else None) - file = file_factory.build_from_mapping( - mapping=mapping, - tenant_id=tenant_id, - config=config, - ) - new_value.append(file) - - if new_value: - input_value = new_value + if isinstance(input_value, dict) and "type" in input_value and "transfer_method" in input_value: + input_value = file_factory.build_from_mapping(mapping=input_value, tenant_id=tenant_id) + if ( + isinstance(input_value, list) + and all(isinstance(item, dict) for item in input_value) + and all("type" in item and "transfer_method" in item for item in input_value) + ): + input_value = file_factory.build_from_mappings(mappings=input_value, tenant_id=tenant_id) # append variable and value to variable pool variable_pool.add([variable_node_id] + variable_key_list, input_value) diff --git a/api/factories/file_factory.py b/api/factories/file_factory.py index 1c368a22cac4db..ad8dba81905362 100644 --- a/api/factories/file_factory.py +++ b/api/factories/file_factory.py @@ -86,12 +86,9 @@ def build_from_mapping( def build_from_mappings( *, mappings: Sequence[Mapping[str, Any]], - config: FileUploadConfig | None, + config: FileUploadConfig | None = None, tenant_id: str, ) -> Sequence[File]: - if not config: - return [] - files = [ build_from_mapping( mapping=mapping, @@ -102,13 +99,14 @@ def build_from_mappings( ] if ( + config # If image config is set. - config.image_config + and config.image_config # And the number of image files exceeds the maximum limit and sum(1 for _ in (filter(lambda x: x.type == FileType.IMAGE, files))) > config.image_config.number_limits ): raise ValueError(f"Number of image files exceeds the maximum limit {config.image_config.number_limits}") - if config.number_limits and len(files) > config.number_limits: + if config and config.number_limits and len(files) > config.number_limits: raise ValueError(f"Number of files exceeds the maximum limit {config.number_limits}") return files diff --git a/api/libs/helper.py b/api/libs/helper.py index 023240a9a4de46..b98a4829e8b6e4 100644 --- a/api/libs/helper.py +++ b/api/libs/helper.py @@ -31,12 +31,12 @@ def output(self, key, obj): if obj is None: return None - from models.model import App, IconType + from models.model import App, IconType, Site if isinstance(obj, dict) and "app" in obj: obj = obj["app"] - if isinstance(obj, App) and obj.icon_type == IconType.IMAGE.value: + if isinstance(obj, App | Site) and obj.icon_type == IconType.IMAGE.value: return file_helpers.get_signed_file_url(obj.icon) return None diff --git a/api/models/account.py b/api/models/account.py index 6684e8dd6ec65d..951e836dec1873 100644 --- a/api/models/account.py +++ b/api/models/account.py @@ -56,8 +56,8 @@ def current_tenant(self, value: "Tenant"): self._current_tenant = tenant @property - def current_tenant_id(self): - return self._current_tenant.id + def current_tenant_id(self) -> str | None: + return self._current_tenant.id if self._current_tenant else None @current_tenant_id.setter def current_tenant_id(self, value: str): @@ -108,6 +108,10 @@ def get_integrates(self) -> list[db.Model]: def is_admin_or_owner(self): return TenantAccountRole.is_privileged_role(self._current_tenant.current_role) + @property + def is_admin(self): + return TenantAccountRole.is_admin_role(self._current_tenant.current_role) + @property def is_editor(self): return TenantAccountRole.is_editing_role(self._current_tenant.current_role) @@ -147,6 +151,10 @@ def is_valid_role(role: str) -> bool: def is_privileged_role(role: str) -> bool: return role and role in {TenantAccountRole.OWNER, TenantAccountRole.ADMIN} + @staticmethod + def is_admin_role(role: str) -> bool: + return role and role == TenantAccountRole.ADMIN + @staticmethod def is_non_owner_role(role: str) -> bool: return role and role in { diff --git a/api/models/workflow.py b/api/models/workflow.py index 5b0617828d0f7f..fd53f137f906bf 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -1,7 +1,7 @@ import json from collections.abc import Mapping, Sequence from datetime import UTC, datetime -from enum import Enum +from enum import Enum, StrEnum from typing import Any, Optional, Union import sqlalchemy as sa @@ -314,7 +314,7 @@ def conversation_variables(self, value: Sequence[Variable]) -> None: ) -class WorkflowRunStatus(Enum): +class WorkflowRunStatus(StrEnum): """ Workflow Run Status Enum """ @@ -393,13 +393,13 @@ class WorkflowRun(db.Model): version = db.Column(db.String(255), nullable=False) graph = db.Column(db.Text) inputs = db.Column(db.Text) - status = db.Column(db.String(255), nullable=False) - outputs: Mapped[str] = db.Column(db.Text) + status = db.Column(db.String(255), nullable=False) # running, succeeded, failed, stopped + outputs: Mapped[str] = mapped_column(sa.Text, default="{}") error = db.Column(db.Text) elapsed_time = db.Column(db.Float, nullable=False, server_default=db.text("0")) total_tokens = db.Column(db.Integer, nullable=False, server_default=db.text("0")) total_steps = db.Column(db.Integer, server_default=db.text("0")) - created_by_role = db.Column(db.String(255), nullable=False) + created_by_role = db.Column(db.String(255), nullable=False) # account, end_user created_by = db.Column(StringUUID, nullable=False) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) finished_at = db.Column(db.DateTime) diff --git a/api/poetry.lock b/api/poetry.lock index cdfc293405f843..958673a00bf947 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -1422,36 +1422,40 @@ files = [ [[package]] name = "chroma-hnswlib" -version = "0.7.3" +version = "0.7.6" description = "Chromas fork of hnswlib" optional = false python-versions = "*" files = [ - {file = "chroma-hnswlib-0.7.3.tar.gz", hash = "sha256:b6137bedde49fffda6af93b0297fe00429fc61e5a072b1ed9377f909ed95a932"}, - {file = "chroma_hnswlib-0.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59d6a7c6f863c67aeb23e79a64001d537060b6995c3eca9a06e349ff7b0998ca"}, - {file = "chroma_hnswlib-0.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d71a3f4f232f537b6152947006bd32bc1629a8686df22fd97777b70f416c127a"}, - {file = "chroma_hnswlib-0.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c92dc1ebe062188e53970ba13f6b07e0ae32e64c9770eb7f7ffa83f149d4210"}, - {file = "chroma_hnswlib-0.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49da700a6656fed8753f68d44b8cc8ae46efc99fc8a22a6d970dc1697f49b403"}, - {file = "chroma_hnswlib-0.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:108bc4c293d819b56476d8f7865803cb03afd6ca128a2a04d678fffc139af029"}, - {file = "chroma_hnswlib-0.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:11e7ca93fb8192214ac2b9c0943641ac0daf8f9d4591bb7b73be808a83835667"}, - {file = "chroma_hnswlib-0.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f552e4d23edc06cdeb553cdc757d2fe190cdeb10d43093d6a3319f8d4bf1c6b"}, - {file = "chroma_hnswlib-0.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f96f4d5699e486eb1fb95849fe35ab79ab0901265805be7e60f4eaa83ce263ec"}, - {file = "chroma_hnswlib-0.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:368e57fe9ebae05ee5844840fa588028a023d1182b0cfdb1d13f607c9ea05756"}, - {file = "chroma_hnswlib-0.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:b7dca27b8896b494456db0fd705b689ac6b73af78e186eb6a42fea2de4f71c6f"}, - {file = "chroma_hnswlib-0.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:70f897dc6218afa1d99f43a9ad5eb82f392df31f57ff514ccf4eeadecd62f544"}, - {file = "chroma_hnswlib-0.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aef10b4952708f5a1381c124a29aead0c356f8d7d6e0b520b778aaa62a356f4"}, - {file = "chroma_hnswlib-0.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ee2d8d1529fca3898d512079144ec3e28a81d9c17e15e0ea4665697a7923253"}, - {file = "chroma_hnswlib-0.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:a4021a70e898783cd6f26e00008b494c6249a7babe8774e90ce4766dd288c8ba"}, - {file = "chroma_hnswlib-0.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a8f61fa1d417fda848e3ba06c07671f14806a2585272b175ba47501b066fe6b1"}, - {file = "chroma_hnswlib-0.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d7563be58bc98e8f0866907368e22ae218d6060601b79c42f59af4eccbbd2e0a"}, - {file = "chroma_hnswlib-0.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51b8d411486ee70d7b66ec08cc8b9b6620116b650df9c19076d2d8b6ce2ae914"}, - {file = "chroma_hnswlib-0.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d706782b628e4f43f1b8a81e9120ac486837fbd9bcb8ced70fe0d9b95c72d77"}, - {file = "chroma_hnswlib-0.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:54f053dedc0e3ba657f05fec6e73dd541bc5db5b09aa8bc146466ffb734bdc86"}, - {file = "chroma_hnswlib-0.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e607c5a71c610a73167a517062d302c0827ccdd6e259af6e4869a5c1306ffb5d"}, - {file = "chroma_hnswlib-0.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2358a795870156af6761890f9eb5ca8cade57eb10c5f046fe94dae1faa04b9e"}, - {file = "chroma_hnswlib-0.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cea425df2e6b8a5e201fff0d922a1cc1d165b3cfe762b1408075723c8892218"}, - {file = "chroma_hnswlib-0.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:454df3dd3e97aa784fba7cf888ad191e0087eef0fd8c70daf28b753b3b591170"}, - {file = "chroma_hnswlib-0.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:df587d15007ca701c6de0ee7d5585dd5e976b7edd2b30ac72bc376b3c3f85882"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f35192fbbeadc8c0633f0a69c3d3e9f1a4eab3a46b65458bbcbcabdd9e895c36"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f007b608c96362b8f0c8b6b2ac94f67f83fcbabd857c378ae82007ec92f4d82"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:456fd88fa0d14e6b385358515aef69fc89b3c2191706fd9aee62087b62aad09c"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dfaae825499c2beaa3b75a12d7ec713b64226df72a5c4097203e3ed532680da"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-win_amd64.whl", hash = "sha256:2487201982241fb1581be26524145092c95902cb09fc2646ccfbc407de3328ec"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81181d54a2b1e4727369486a631f977ffc53c5533d26e3d366dda243fb0998ca"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4b4ab4e11f1083dd0a11ee4f0e0b183ca9f0f2ed63ededba1935b13ce2b3606f"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53db45cd9173d95b4b0bdccb4dbff4c54a42b51420599c32267f3abbeb795170"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c093f07a010b499c00a15bc9376036ee4800d335360570b14f7fe92badcdcf9"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-win_amd64.whl", hash = "sha256:0540b0ac96e47d0aa39e88ea4714358ae05d64bbe6bf33c52f316c664190a6a3"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e87e9b616c281bfbe748d01705817c71211613c3b063021f7ed5e47173556cb7"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec5ca25bc7b66d2ecbf14502b5729cde25f70945d22f2aaf523c2d747ea68912"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305ae491de9d5f3c51e8bd52d84fdf2545a4a2bc7af49765cda286b7bb30b1d4"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:822ede968d25a2c88823ca078a58f92c9b5c4142e38c7c8b4c48178894a0a3c5"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2fe6ea949047beed19a94b33f41fe882a691e58b70c55fdaa90274ae78be046f"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feceff971e2a2728c9ddd862a9dd6eb9f638377ad98438876c9aeac96c9482f5"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb0633b60e00a2b92314d0bf5bbc0da3d3320be72c7e3f4a9b19f4609dc2b2ab"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a566abe32fab42291f766d667bdbfa234a7f457dcbd2ba19948b7a978c8ca624"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6be47853d9a58dedcfa90fc846af202b071f028bbafe1d8711bf64fe5a7f6111"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a7af35bdd39a88bffa49f9bb4bf4f9040b684514a024435a1ef5cdff980579d"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a53b1f1551f2b5ad94eb610207bde1bb476245fc5097a2bec2b476c653c58bde"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3085402958dbdc9ff5626ae58d696948e715aef88c86d1e3f9285a88f1afd3bc"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:77326f658a15adfb806a16543f7db7c45f06fd787d699e643642d6bde8ed49c4"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:93b056ab4e25adab861dfef21e1d2a2756b18be5bc9c292aa252fa12bb44e6ae"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fe91f018b30452c16c811fd6c8ede01f84e5a9f3c23e0758775e57f1c3778871"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c0e627476f0f4d9e153420d36042dd9c6c3671cfd1fe511c0253e38c2a1039"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e9796a4536b7de6c6d76a792ba03e08f5aaa53e97e052709568e50b4d20c04f"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:d30e2db08e7ffdcc415bd072883a322de5995eb6ec28a8f8c054103bbd3ec1e0"}, + {file = "chroma_hnswlib-0.7.6.tar.gz", hash = "sha256:4dce282543039681160259d29fcde6151cc9106c6461e0485f57cdccd83059b7"}, ] [package.dependencies] @@ -1459,26 +1463,26 @@ numpy = "*" [[package]] name = "chromadb" -version = "0.5.1" +version = "0.5.20" description = "Chroma." optional = false python-versions = ">=3.8" files = [ - {file = "chromadb-0.5.1-py3-none-any.whl", hash = "sha256:61f1f75a672b6edce7f1c8875c67e2aaaaf130dc1c1684431fbc42ad7240d01d"}, - {file = "chromadb-0.5.1.tar.gz", hash = "sha256:e2b2b6a34c2a949bedcaa42fa7775f40c7f6667848fc8094dcbf97fc0d30bee7"}, + {file = "chromadb-0.5.20-py3-none-any.whl", hash = "sha256:9550ba1b6dce911e35cac2568b301badf4b42f457b99a432bdeec2b6b9dd3680"}, + {file = "chromadb-0.5.20.tar.gz", hash = "sha256:19513a23b2d20059866216bfd80195d1d4a160ffba234b8899f5e80978160ca7"}, ] [package.dependencies] bcrypt = ">=4.0.1" build = ">=1.0.3" -chroma-hnswlib = "0.7.3" +chroma-hnswlib = "0.7.6" fastapi = ">=0.95.2" grpcio = ">=1.58.0" httpx = ">=0.27.0" importlib-resources = "*" kubernetes = ">=28.1.0" mmh3 = ">=4.0.1" -numpy = ">=1.22.5,<2.0.0" +numpy = ">=1.22.5" onnxruntime = ">=1.14.1" opentelemetry-api = ">=1.2.0" opentelemetry-exporter-otlp-proto-grpc = ">=1.2.0" @@ -1490,7 +1494,7 @@ posthog = ">=2.4.0" pydantic = ">=1.9" pypika = ">=0.48.9" PyYAML = ">=6.0.0" -requests = ">=2.28" +rich = ">=10.11.0" tenacity = ">=8.2.3" tokenizers = ">=0.13.2" tqdm = ">=4.65.0" @@ -11022,4 +11026,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.13" -content-hash = "75175c3427d13c41d84374ff2bb6f5c6cb157e3783107f9d22fad15c9eb8c177" +content-hash = "983ba4f2cb89f0c867fc50cb48677cad9343f7f0828c7082cb0b5cf171d716fb" diff --git a/api/pyproject.toml b/api/pyproject.toml index 6dbb16d820496f..79857f81635eb0 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -242,7 +242,7 @@ tos = "~2.7.1" [tool.poetry.group.vdb.dependencies] alibabacloud_gpdb20160503 = "~3.8.0" alibabacloud_tea_openapi = "~0.3.9" -chromadb = "0.5.1" +chromadb = "0.5.20" clickhouse-connect = "~0.7.16" couchbase = "~4.3.0" elasticsearch = "8.14.0" diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index f3e76d3300dda2..8642972710fd1f 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -1,3 +1,4 @@ +from collections.abc import Callable from datetime import UTC, datetime from typing import Optional, Union @@ -74,14 +75,14 @@ def pagination_by_last_id( return InfiniteScrollPagination(data=conversations, limit=limit, has_more=has_more) @classmethod - def _get_sort_params(cls, sort_by: str) -> tuple[str, callable]: + def _get_sort_params(cls, sort_by: str): if sort_by.startswith("-"): return sort_by[1:], desc return sort_by, asc @classmethod def _build_filter_condition( - cls, sort_field: str, sort_direction: callable, reference_conversation: Conversation, is_next_page: bool = False + cls, sort_field: str, sort_direction: Callable, reference_conversation: Conversation, is_next_page: bool = False ): field_value = getattr(reference_conversation, sort_field) if (sort_direction == desc and not is_next_page) or (sort_direction == asc and is_next_page): @@ -160,5 +161,5 @@ def delete(cls, app_model: App, conversation_id: str, user: Optional[Union[Accou conversation = cls.get_conversation(app_model, conversation_id, user) conversation.is_deleted = True - conversation.updated_at = datetime.now(timezone.utc).replace(tzinfo=None) + conversation.updated_at = datetime.now(UTC).replace(tzinfo=None) db.session.commit() diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index fde8673ff5779b..aa2babd7f7806b 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -262,13 +262,17 @@ def run_draft_workflow_node( if run_succeeded and node_run_result: # create workflow node execution - workflow_node_execution.inputs = json.dumps(node_run_result.inputs) if node_run_result.inputs else None - workflow_node_execution.process_data = ( - json.dumps(node_run_result.process_data) if node_run_result.process_data else None - ) - workflow_node_execution.outputs = ( - json.dumps(jsonable_encoder(node_run_result.outputs)) if node_run_result.outputs else None + inputs = WorkflowEntry.handle_special_values(node_run_result.inputs) if node_run_result.inputs else None + process_data = ( + WorkflowEntry.handle_special_values(node_run_result.process_data) + if node_run_result.process_data + else None ) + outputs = WorkflowEntry.handle_special_values(node_run_result.outputs) if node_run_result.outputs else None + + workflow_node_execution.inputs = json.dumps(inputs) + workflow_node_execution.process_data = json.dumps(process_data) + workflow_node_execution.outputs = json.dumps(outputs) workflow_node_execution.execution_metadata = ( json.dumps(jsonable_encoder(node_run_result.metadata)) if node_run_result.metadata else None ) @@ -303,10 +307,10 @@ def convert_to_workflow(self, app_model: App, account: Account, args: dict) -> A new_app = workflow_converter.convert_to_workflow( app_model=app_model, account=account, - name=args.get("name"), - icon_type=args.get("icon_type"), - icon=args.get("icon"), - icon_background=args.get("icon_background"), + name=args.get("name", "Default Name"), + icon_type=args.get("icon_type", "emoji"), + icon=args.get("icon", "🤖"), + icon_background=args.get("icon_background", "#FFEAD5"), ) return new_app diff --git a/api/tasks/add_document_to_index_task.py b/api/tasks/add_document_to_index_task.py index 81531c9590d978..09be6612160471 100644 --- a/api/tasks/add_document_to_index_task.py +++ b/api/tasks/add_document_to_index_task.py @@ -18,9 +18,9 @@ def add_document_to_index_task(dataset_document_id: str): """ Async Add document to index - :param document_id: + :param dataset_document_id: - Usage: add_document_to_index.delay(document_id) + Usage: add_document_to_index.delay(dataset_document_id) """ logging.info(click.style("Start add document to index: {}".format(dataset_document_id), fg="green")) start_at = time.perf_counter() diff --git a/docker-legacy/docker-compose.chroma.yaml b/docker-legacy/docker-compose.chroma.yaml index a943d620c085f0..63354305deff40 100644 --- a/docker-legacy/docker-compose.chroma.yaml +++ b/docker-legacy/docker-compose.chroma.yaml @@ -1,7 +1,7 @@ services: # Chroma vector store. chroma: - image: ghcr.io/chroma-core/chroma:0.5.1 + image: ghcr.io/chroma-core/chroma:0.5.20 restart: always volumes: - ./volumes/chroma:/chroma/chroma diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml index 7bf2cd47087e51..aaca3c9c12b74d 100644 --- a/docker-legacy/docker-compose.yaml +++ b/docker-legacy/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: # API service api: - image: langgenius/dify-api:0.11.2 + image: langgenius/dify-api:0.12.1 restart: always environment: # Startup mode, 'api' starts the API server. @@ -227,7 +227,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.11.2 + image: langgenius/dify-api:0.12.1 restart: always environment: CONSOLE_WEB_URL: '' @@ -397,7 +397,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.11.2 + image: langgenius/dify-web:0.12.1 restart: always environment: # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 285f576b0e83c9..3e2b276c92d66e 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -291,7 +291,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:0.11.2 + image: langgenius/dify-api:0.12.1 restart: always environment: # Use the shared environment variables. @@ -311,7 +311,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.11.2 + image: langgenius/dify-api:0.12.1 restart: always environment: # Use the shared environment variables. @@ -330,7 +330,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.11.2 + image: langgenius/dify-web:0.12.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -605,7 +605,7 @@ services: # Chroma vector database chroma: - image: ghcr.io/chroma-core/chroma:0.5.1 + image: ghcr.io/chroma-core/chroma:0.5.20 profiles: - chroma restart: always diff --git a/web/app/components/base/ga/index.tsx b/web/app/components/base/ga/index.tsx index 219724113f681d..0015edbfca9368 100644 --- a/web/app/components/base/ga/index.tsx +++ b/web/app/components/base/ga/index.tsx @@ -47,6 +47,12 @@ gtag('config', '${gaIdMaps[gaType]}'); nonce={nonce!} > + {/* Cookie banner */} + ) diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx index be2ef547430852..e76611eb0784af 100644 --- a/web/app/components/develop/template/template_workflow.en.mdx +++ b/web/app/components/develop/template/template_workflow.en.mdx @@ -54,7 +54,7 @@ Workflow applications offers non-session support and is ideal for translation, a User identifier, used to define the identity of the end-user for retrieval and statistics. Should be uniquely defined by the developer within the application. - `files` (array[object]) Optional - File list, suitable for inputting files combined with text understanding and answering questions, available only when the model supports Vision capability. + File list, suitable for inputting files combined with text understanding and answering questions, available only when the model supports file parsing and understanding capability. - `type` (string) Supported type: - `document` ('TXT', 'MD', 'MARKDOWN', 'PDF', 'HTML', 'XLSX', 'XLS', 'DOCX', 'CSV', 'EML', 'MSG', 'PPTX', 'PPT', 'XML', 'EPUB') - `image` ('JPG', 'JPEG', 'PNG', 'GIF', 'WEBP', 'SVG') @@ -188,6 +188,19 @@ Workflow applications offers non-session support and is ideal for translation, a }' ``` + + + ```json {{ title: 'File variable example' }} + { + "inputs": { + "{variable_name}": { + "transfer_method": "local_file", + "upload_file_id": "{upload_file_id}", + "type": "{document_type}" + } + } + } + ``` ### Blocking Mode @@ -223,7 +236,88 @@ Workflow applications offers non-session support and is ideal for translation, a data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + + ```json {{ title: 'File upload sample code' }} + { + import requests + import json + + def upload_file(file_path, user): + upload_url = "https://api.dify.ai/v1/files/upload" + headers = { + "Authorization": "Bearer app-xxxxxxxx", + } + + try: + print("Upload file...") + with open(file_path, 'rb') as file: + files = { + 'file': (file_path, file, 'text/plain') # Make sure the file is uploaded with the appropriate MIME type + } + data = { + "user": user, + "type": "TXT" # Set the file type to TXT + } + + response = requests.post(upload_url, headers=headers, files=files, data=data) + if response.status_code == 201: # 201 means creation is successful + print("File uploaded successfully") + return response.json().get("id") # Get the uploaded file ID + else: + print(f"File upload failed, status code: {response.status_code}") + return None + except Exception as e: + print(f"Error occurred: {str(e)}") + return None + + def run_workflow(file_id, user, response_mode="blocking"): + workflow_url = "https://api.dify.ai/v1/workflows/run" + headers = { + "Authorization": "Bearer app-xxxxxxxxx", + "Content-Type": "application/json" + } + data = { + "inputs": { + "orig_mail": { + "transfer_method": "local_file", + "upload_file_id": file_id, + "type": "document" + } + }, + "response_mode": response_mode, + "user": user + } + + try: + print("Run Workflow...") + response = requests.post(workflow_url, headers=headers, json=data) + if response.status_code == 200: + print("Workflow execution successful") + return response.json() + else: + print(f"Workflow execution failed, status code: {response.status_code}") + return {"status": "error", "message": f"Failed to execute workflow, status code: {response.status_code}"} + except Exception as e: + print(f"Error occurred: {str(e)}") + return {"status": "error", "message": str(e)} + + # Usage Examples + file_path = "{your_file_path}" + user = "difyuser" + + # Upload files + file_id = upload_file(file_path, user) + if file_id: + # The file was uploaded successfully, and the workflow continues to run + result = run_workflow(file_id, user) + print(result) + else: + print("File upload failed and workflow cannot be executed") + + } + ``` + diff --git a/web/app/components/develop/template/template_workflow.ja.mdx b/web/app/components/develop/template/template_workflow.ja.mdx index ad669430f2d876..609f6a2891c42f 100644 --- a/web/app/components/develop/template/template_workflow.ja.mdx +++ b/web/app/components/develop/template/template_workflow.ja.mdx @@ -54,7 +54,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from ユーザー識別子、エンドユーザーのアイデンティティを定義するために使用されます。 アプリケーション内で開発者によって一意に定義される必要があります。 - `files` (array[object]) オプション - ファイルリスト、テキストの理解と質問への回答を組み合わせたファイルの入力に適しており、モデルがビジョン機能をサポートしている場合にのみ利用可能です。 + ファイルリストは、テキスト理解と質問への回答を組み合わせたファイルの入力に適しています。モデルがファイルの解析と理解機能をサポートしている場合にのみ使用できます。 - `type` (string) サポートされているタイプ: - `document` ('TXT', 'MD', 'MARKDOWN', 'PDF', 'HTML', 'XLSX', 'XLS', 'DOCX', 'CSV', 'EML', 'MSG', 'PPTX', 'PPT', 'XML', 'EPUB') - `image` ('JPG', 'JPEG', 'PNG', 'GIF', 'WEBP', 'SVG') @@ -188,6 +188,19 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from }' ``` + + + ```json {{ title: 'ファイル変数の例' }} + { + "inputs": { + "{variable_name}": { + "transfer_method": "local_file", + "upload_file_id": "{upload_file_id}", + "type": "{document_type}" + } + } + } + ``` ### ブロッキングモード @@ -223,7 +236,88 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + + ```json {{ title: 'ファイルアップロードのサンプルコード' }} + { + import requests + import json + + def upload_file(file_path, user): + upload_url = "https://api.dify.ai/v1/files/upload" + headers = { + "Authorization": "Bearer app-xxxxxxxx", + } + + try: + print("ファイルをアップロードしています...") + with open(file_path, 'rb') as file: + files = { + 'file': (file_path, file, 'text/plain') # ファイルが適切な MIME タイプでアップロードされていることを確認してください + } + data = { + "user": user, + "type": "TXT" # ファイルタイプをTXTに設定します + } + + response = requests.post(upload_url, headers=headers, files=files, data=data) + if response.status_code == 201: # 201 は作成が成功したことを意味します + print("ファイルが正常にアップロードされました") + return response.json().get("id") # アップロードされたファイルIDを取得する + else: + print(f"ファイルのアップロードに失敗しました。ステータス コード: {response.status_code}") + return None + except Exception as e: + print(f"エラーが発生しました: {str(e)}") + return None + + def run_workflow(file_id, user, response_mode="blocking"): + workflow_url = "https://api.dify.ai/v1/workflows/run" + headers = { + "Authorization": "Bearer app-xxxxxxxxx", + "Content-Type": "application/json" + } + data = { + "inputs": { + "orig_mail": { + "transfer_method": "local_file", + "upload_file_id": file_id, + "type": "document" + } + }, + "response_mode": response_mode, + "user": user + } + + try: + print("ワークフローを実行...") + response = requests.post(workflow_url, headers=headers, json=data) + if response.status_code == 200: + print("ワークフローが正常に実行されました") + return response.json() + else: + print(f"ワークフローの実行がステータス コードで失敗しました: {response.status_code}") + return {"status": "error", "message": f"Failed to execute workflow, status code: {response.status_code}"} + except Exception as e: + print(f"エラーが発生しました: {str(e)}") + return {"status": "error", "message": str(e)} + + # 使用例 + file_path = "{your_file_path}" + user = "difyuser" + + # ファイルをアップロードする + file_id = upload_file(file_path, user) + if file_id: + # ファイルは正常にアップロードされました。ワークフローの実行を続行します + result = run_workflow(file_id, user) + print(result) + else: + print("ファイルのアップロードに失敗し、ワークフローを実行できません") + + } + ``` + diff --git a/web/app/components/develop/template/template_workflow.zh.mdx b/web/app/components/develop/template/template_workflow.zh.mdx index 5b6fdb13816a5e..71c3d356753df3 100644 --- a/web/app/components/develop/template/template_workflow.zh.mdx +++ b/web/app/components/develop/template/template_workflow.zh.mdx @@ -52,7 +52,7 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 用户标识,用于定义终端用户的身份,方便检索、统计。 由开发者定义规则,需保证用户标识在应用内唯一。 - `files` (array[object]) Optional - 文件列表,适用于传入文件结合文本理解并回答问题,仅当模型支持 Vision 能力时可用。 + 文件列表,适用于传入文件结合文本理解并回答问题,仅当模型支持该类型文件解析能力时可用。 - `type` (string) 支持类型: - `document` 具体类型包含:'TXT', 'MD', 'MARKDOWN', 'PDF', 'HTML', 'XLSX', 'XLS', 'DOCX', 'CSV', 'EML', 'MSG', 'PPTX', 'PPT', 'XML', 'EPUB' - `image` 具体类型包含:'JPG', 'JPEG', 'PNG', 'GIF', 'WEBP', 'SVG' @@ -171,8 +171,7 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 - - + ```bash {{ title: 'cURL' }} curl -X POST '${props.appDetail.api_base_url}/workflows/run' \ --header 'Authorization: Bearer {api_key}' \ @@ -183,7 +182,19 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 "user": "abc-123" }' ``` - + + + ```json {{ title: 'File variable example' }} + { + "inputs": { + "{variable_name}": { + "transfer_method": "local_file", + "upload_file_id": "{upload_file_id}", + "type": "{document_type}" + } + } + } + ``` ### Blocking Mode @@ -219,7 +230,88 @@ Workflow 应用无会话支持,适合用于翻译/文章写作/总结 AI 等 data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} ``` + + ```json {{ title: 'File upload sample code' }} + { + import requests + import json + + def upload_file(file_path, user): + upload_url = "https://api.dify.ai/v1/files/upload" + headers = { + "Authorization": "Bearer app-xxxxxxxx", + } + + try: + print("上传文件中...") + with open(file_path, 'rb') as file: + files = { + 'file': (file_path, file, 'text/plain') # 确保文件以适当的MIME类型上传 + } + data = { + "user": user, + "type": "TXT" # 设置文件类型为TXT + } + + response = requests.post(upload_url, headers=headers, files=files, data=data) + if response.status_code == 201: # 201 表示创建成功 + print("文件上传成功") + return response.json().get("id") # 获取上传的文件 ID + else: + print(f"文件上传失败,状态码: {response.status_code}") + return None + except Exception as e: + print(f"发生错误: {str(e)}") + return None + + def run_workflow(file_id, user, response_mode="blocking"): + workflow_url = "https://api.dify.ai/v1/workflows/run" + headers = { + "Authorization": "Bearer app-xxxxxxxxx", + "Content-Type": "application/json" + } + + data = { + "inputs": { + "orig_mail": { + "transfer_method": "local_file", + "upload_file_id": file_id, + "type": "document" + } + }, + "response_mode": response_mode, + "user": user + } + try: + print("运行工作流...") + response = requests.post(workflow_url, headers=headers, json=data) + if response.status_code == 200: + print("工作流执行成功") + return response.json() + else: + print(f"工作流执行失败,状态码: {response.status_code}") + return {"status": "error", "message": f"Failed to execute workflow, status code: {response.status_code}"} + except Exception as e: + print(f"发生错误: {str(e)}") + return {"status": "error", "message": str(e)} + + # 使用示例 + file_path = "{your_file_path}" + user = "difyuser" + + # 上传文件 + file_id = upload_file(file_path, user) + if file_id: + # 文件上传成功,继续运行工作流 + result = run_workflow(file_id, user) + print(result) + else: + print("文件上传失败,无法执行工作流") + + } + ``` + diff --git a/web/app/components/explore/app-list/index.tsx b/web/app/components/explore/app-list/index.tsx index 6186d8164d676d..b8e7939328ce30 100644 --- a/web/app/components/explore/app-list/index.tsx +++ b/web/app/components/explore/app-list/index.tsx @@ -14,7 +14,7 @@ import type { App } from '@/models/explore' import Category from '@/app/components/explore/category' import AppCard from '@/app/components/explore/app-card' import { fetchAppDetail, fetchAppList } from '@/service/explore' -import { importApp } from '@/service/apps' +import { importDSL } from '@/service/apps' import { useTabSearchParams } from '@/hooks/use-tab-searchparams' import CreateAppModal from '@/app/components/explore/create-app-modal' import AppTypeSelector from '@/app/components/app/type-selector' @@ -24,6 +24,7 @@ import { NEED_REFRESH_APP_LIST_KEY } from '@/config' import { useAppContext } from '@/context/app-context' import { getRedirection } from '@/utils/app-redirection' import Input from '@/app/components/base/input' +import { DSLImportMode } from '@/models/app' type AppsProps = { pageType?: PageType @@ -127,8 +128,9 @@ const Apps = ({ currApp?.app.id as string, ) try { - const app = await importApp({ - data: export_data, + const app = await importDSL({ + mode: DSLImportMode.YAML_CONTENT, + yaml_content: export_data, name, icon_type, icon, @@ -143,7 +145,7 @@ const Apps = ({ if (onSuccess) onSuccess() localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1') - getRedirection(isCurrentWorkspaceEditor, app, push) + getRedirection(isCurrentWorkspaceEditor, { id: app.app_id }, push) } catch (e) { Toast.notify({ type: 'error', message: t('app.newApp.appCreateFailed') }) diff --git a/web/app/components/workflow/hooks/use-workflow-template.ts b/web/app/components/workflow/hooks/use-workflow-template.ts index e36f0b61f9b26f..c2dc956b6316d2 100644 --- a/web/app/components/workflow/hooks/use-workflow-template.ts +++ b/web/app/components/workflow/hooks/use-workflow-template.ts @@ -22,6 +22,7 @@ export const useWorkflowTemplate = () => { ...nodesInitialData.llm, memory: { window: { enabled: false, size: 10 }, + query_prompt_template: '{{#sys.query#}}', }, selected: true, }, diff --git a/web/app/components/workflow/nodes/_base/components/before-run-form/index.tsx b/web/app/components/workflow/nodes/_base/components/before-run-form/index.tsx index 6a3da3cf24d20e..79d9c5b4dd8217 100644 --- a/web/app/components/workflow/nodes/_base/components/before-run-form/index.tsx +++ b/web/app/components/workflow/nodes/_base/components/before-run-form/index.tsx @@ -16,6 +16,7 @@ import { InputVarType, NodeRunningStatus } from '@/app/components/workflow/types import ResultPanel from '@/app/components/workflow/run/result-panel' import Toast from '@/app/components/base/toast' import { TransferMethod } from '@/types/app' +import { getProcessedFiles } from '@/app/components/base/file-uploader/utils' const i18nPrefix = 'workflow.singleRun' @@ -39,6 +40,11 @@ function formatValue(value: string | any, type: InputVarType) { return JSON.parse(item) }) } + if (type === InputVarType.multiFiles) + return getProcessedFiles(value) + + if (type === InputVarType.singleFile) + return getProcessedFiles([value])[0] return value } diff --git a/web/app/signin/layout.tsx b/web/app/signin/layout.tsx index 342876bc53ec22..b404c5c4de5f77 100644 --- a/web/app/signin/layout.tsx +++ b/web/app/signin/layout.tsx @@ -1,25 +1,10 @@ -import Script from 'next/script' import Header from './_header' import style from './page.module.css' import cn from '@/utils/classnames' -import { IS_CE_EDITION } from '@/config' export default async function SignInLayout({ children }: any) { return <> - {!IS_CE_EDITION && ( - <> - - - - )} -
=18.17.0" diff --git a/web/themes/dark.css b/web/themes/dark.css index f89c59190d5e97..892f48fa482c17 100644 --- a/web/themes/dark.css +++ b/web/themes/dark.css @@ -85,6 +85,10 @@ html[data-theme="dark"] { --color-components-button-secondary-accent-border-hover: #FFFFFF1F; --color-components-button-secondary-accent-border-disabled: #FFFFFF0D; + --color-components-button-indigo-bg: #444CE7; + --color-components-button-indigo-bg-hover: #6172F3; + --color-components-button-indigo-bg-disabled: #FFFFFF08; + --color-components-checkbox-icon: #FFFFFFF2; --color-components-checkbox-icon-disabled: #FFFFFF33; --color-components-checkbox-bg: #296DFF; @@ -95,10 +99,11 @@ html[data-theme="dark"] { --color-components-checkbox-border-disabled: #FFFFFF03; --color-components-checkbox-bg-unchecked: #FFFFFF08; --color-components-checkbox-bg-unchecked-hover: #FFFFFF0D; + --color-components-checkbox-bg-disabled-checked: #155AEF33; --color-components-radio-border-checked: #296DFF; --color-components-radio-border-checked-hover: #5289FF; - --color-components-radio-border-checked-disabled: #FFFFFF14; + --color-components-radio-border-checked-disabled: #155AEF33; --color-components-radio-bg-disabled: #FFFFFF08; --color-components-radio-border: #FFFFFF66; --color-components-radio-border-hover: #FFFFFF99; @@ -135,6 +140,9 @@ html[data-theme="dark"] { --color-components-panel-on-panel-item-bg: #27272B; --color-components-panel-on-panel-item-bg-hover: #3A3A40; --color-components-panel-on-panel-item-bg-alt: #3A3A40; + --color-components-panel-on-panel-item-bg-transparent: #2C2C30F2; + --color-components-panel-on-panel-item-bg-hover-transparent: #3A3A4000; + --color-components-panel-on-panel-item-bg-destructive-hover-transparent: #FFFBFA00; --color-components-panel-bg-transparent: #22222500; @@ -208,10 +216,12 @@ html[data-theme="dark"] { --color-components-actionbar-bg: #222225; --color-components-actionbar-border: #C8CEDA14; + --color-components-actionbar-bg-accent: #27272B; + --color-components-actionbar-border-accent: #5289FF; --color-components-dropzone-bg-alt: #18181BCC; --color-components-dropzone-bg: #18181B66; - --color-components-dropzone-bg-accent: #155AEF24; + --color-components-dropzone-bg-accent: #155AEF33; --color-components-dropzone-border: #C8CEDA24; --color-components-dropzone-border-alt: #C8CEDA33; --color-components-dropzone-border-accent: #84ABFF; @@ -228,6 +238,14 @@ html[data-theme="dark"] { --color-components-progress-gray-border: #98A2B2; --color-components-progress-gray-bg: #C8CEDA05; + --color-components-progress-warning-progress: #FDB022; + --color-components-progress-warning-border: #FDB022; + --color-components-progress-warning-bg: #F790090A; + + --color-components-progress-error-progress: #F97066; + --color-components-progress-error-border: #F97066; + --color-components-progress-error-bg: #F044380A; + --color-components-chat-input-audio-bg: #155AEF33; --color-components-chat-input-audio-wave-default: #C8CEDA24; --color-components-chat-input-bg-mask-1: #18181B0A; @@ -236,13 +254,103 @@ html[data-theme="dark"] { --color-components-chat-input-audio-wave-active: #84ABFF; --color-components-chat-input-audio-bg-alt: #18181BE5; - --color-components-Avatar-shape-fill-stop-0: #FFFFFFF2; - --color-components-Avatar-shape-fill-stop-100: #FFFFFFCC; - - --color-components-Avatar-bg-mask-stop-0: #FFFFFF33; - --color-components-Avatar-bg-mask-stop-100: #FFFFFF08; - - --color-components-Avatar-default-avatar-bg: #222225; + --color-components-avatar-shape-fill-stop-0: #FFFFFFF2; + --color-components-avatar-shape-fill-stop-100: #FFFFFFCC; + + --color-components-avatar-bg-mask-stop-0: #FFFFFF33; + --color-components-avatar-bg-mask-stop-100: #FFFFFF08; + + --color-components-avatar-default-avatar-bg: #222225; + --color-components-avatar-mask-darkmode-dimmed: #0000001F; + + --color-components-label-gray: #C8CEDA24; + + --color-components-premium-badge-blue-bg-stop-0: #5289FF; + --color-components-premium-badge-blue-bg-stop-100: #296DFF; + --color-components-premium-badge-blue-stroke-stop-0: #FFFFFF33; + --color-components-premium-badge-blue-stroke-stop-100: #296DFF; + --color-components-premium-badge-blue-text-stop-0: #EFF4FF; + --color-components-premium-badge-blue-text-stop-100: #B2CAFF; + --color-components-premium-badge-blue-glow: #004AEB; + --color-components-premium-badge-blue-bg-stop-0-hover: #84ABFF; + --color-components-premium-badge-blue-bg-stop-100-hover: #004AEB; + --color-components-premium-badge-blue-glow-hover: #D1E0FF; + --color-components-premium-badge-blue-stroke-stop-0-hover: #FFFFFF80; + --color-components-premium-badge-blue-stroke-stop-100-hover: #296DFF; + + --color-components-premium-badge-highlight-stop-0: #FFFFFF1F; + --color-components-premium-badge-highlight-stop-100: #FFFFFF33; + --color-components-premium-badge-indigo-bg-stop-0: #6172F3; + --color-components-premium-badge-indigo-bg-stop-100: #3538CD; + --color-components-premium-badge-indigo-stroke-stop-0: #FFFFFF33; + --color-components-premium-badge-indigo-stroke-stop-100: #444CE7; + --color-components-premium-badge-indigo-text-stop-0: #EEF4FF; + --color-components-premium-badge-indigo-text-stop-100: #C7D7FE; + --color-components-premium-badge-indigo-glow: #3538CD; + --color-components-premium-badge-indigo-glow-hover: #E0EAFF; + --color-components-premium-badge-indigo-bg-stop-0-hover: #A4BCFD; + --color-components-premium-badge-indigo-bg-stop-100-hover: #3538CD; + --color-components-premium-badge-indigo-stroke-stop-0-hover: #FFFFFF80; + --color-components-premium-badge-indigo-stroke-stop-100-hover: #444CE7; + + --color-components-premium-badge-grey-bg-stop-0: #676F83; + --color-components-premium-badge-grey-bg-stop-100: #495464; + --color-components-premium-badge-grey-stroke-stop-0: #FFFFFF1F; + --color-components-premium-badge-grey-stroke-stop-100: #495464; + --color-components-premium-badge-grey-text-stop-0: #F9FAFB; + --color-components-premium-badge-grey-text-stop-100: #E9EBF0; + --color-components-premium-badge-grey-glow: #354052; + --color-components-premium-badge-grey-glow-hover: #F2F4F7; + --color-components-premium-badge-grey-bg-stop-0-hover: #98A2B2; + --color-components-premium-badge-grey-bg-stop-100-hover: #354052; + --color-components-premium-badge-grey-stroke-stop-0-hover: #FFFFFF80; + --color-components-premium-badge-grey-stroke-stop-100-hover: #676F83; + + --color-components-premium-badge-orange-bg-stop-0: #FF692E; + --color-components-premium-badge-orange-bg-stop-100: #E04F16; + --color-components-premium-badge-orange-stroke-stop-0: #FFFFFF33; + --color-components-premium-badge-orange-stroke-stop-100: #FF4405; + --color-components-premium-badge-orange-text-stop-0: #FEF6EE; + --color-components-premium-badge-orange-text-stop-100: #F9DBAF; + --color-components-premium-badge-orange-glow: #B93815; + --color-components-premium-badge-orange-glow-hover: #FDEAD7; + --color-components-premium-badge-orange-bg-stop-0-hover: #FF692E; + --color-components-premium-badge-orange-bg-stop-100-hover: #B93815; + --color-components-premium-badge-orange-stroke-stop-0-hover: #FFFFFF80; + --color-components-premium-badge-orange-stroke-stop-100-hover: #FF4405; + + --color-components-progress-bar-bg: #C8CEDA14; + --color-components-progress-bar-progress: #C8CEDA24; + --color-components-progress-bar-border: #FFFFFF08; + --color-components-progress-bar-progress-solid: #FFFFFFF2; + --color-components-progress-bar-progress-highlight: #C8CEDA33; + + --color-components-icon-bg-red-solid: #D92D20; + --color-components-icon-bg-rose-solid: #E31B54; + --color-components-icon-bg-pink-solid: #DD2590; + --color-components-icon-bg-orange-dark-solid: #FF4405; + --color-components-icon-bg-yellow-solid: #EAAA08; + --color-components-icon-bg-green-solid: #4CA30D; + --color-components-icon-bg-teal-solid: #0E9384; + --color-components-icon-bg-blue-light-solid: #0BA5EC; + --color-components-icon-bg-blue-solid: #155AEF; + --color-components-icon-bg-indigo-solid: #444CE7; + --color-components-icon-bg-violet-solid: #7839EE; + --color-components-icon-bg-midnight-solid: #5D698D; + --color-components-icon-bg-rose-soft: #F63D6833; + --color-components-icon-bg-pink-soft: #EE46BC33; + --color-components-icon-bg-orange-dark-soft: #FF440533; + --color-components-icon-bg-yellow-soft: #EAAA0833; + --color-components-icon-bg-green-soft: #66C61C33; + --color-components-icon-bg-teal-soft: #15B79E33; + --color-components-icon-bg-blue-light-soft: #0BA5EC33; + --color-components-icon-bg-blue-soft: #155AEF33; + --color-components-icon-bg-indigo-soft: #6172F333; + --color-components-icon-bg-violet-soft: #875BF733; + --color-components-icon-bg-midnight-soft: #828DAD33; + --color-components-icon-bg-red-soft: #F0443833; + --color-components-icon-bg-orange-solid: #F79009; + --color-components-icon-bg-orange-soft: #F7900933; --color-text-primary: #FBFBFC; --color-text-secondary: #D9D9DE; @@ -302,6 +410,7 @@ html[data-theme="dark"] { --color-background-overlay-alt: #18181B66; --color-background-surface-white: #FFFFFFE5; --color-background-overlay-destructive: #F044384D; + --color-background-overlay-backdrop: #18181BF2; --color-shadow-shadow-1: #0000000D; --color-shadow-shadow-3: #0000001A; @@ -317,14 +426,20 @@ html[data-theme="dark"] { --color-workflow-block-border: #FFFFFF14; --color-workflow-block-parma-bg: #FFFFFF0D; --color-workflow-block-bg: #27272B; + --color-workflow-block-bg-transparent: #27272BF5; --color-workflow-block-border-highlight: #C8CEDA33; --color-workflow-canvas-workflow-dot-color: #8585AD26; --color-workflow-canvas-workflow-bg: #1D1D20; - --color-workflow-link-line-active: #296DFF; + --color-workflow-link-line-active: #5289FF; --color-workflow-link-line-normal: #676F83; - --color-workflow-link-line-handle: #296DFF; + --color-workflow-link-line-handle: #5289FF; + --color-workflow-link-line-normal-transparent: #676F8333; + --color-workflow-link-line-failure-active: #FDB022; + --color-workflow-link-line-failure-handle: #FDB022; + --color-workflow-link-line-failure-button-bg: #F79009; + --color-workflow-link-line-failure-button-hover: #DC6803; --color-workflow-link-line-success-active: #47CD89; --color-workflow-link-line-success-handle: #47CD89; @@ -341,8 +456,8 @@ html[data-theme="dark"] { --color-workflow-display-success-vignette-color: #17B26A40; --color-workflow-display-success-bg-line-pattern: #18181BCC; - --color-workflow-display-glass-1: #FFFFFF03; - --color-workflow-display-glass-2: #FFFFFF08; + --color-workflow-display-glass-1: #FFFFFF08; + --color-workflow-display-glass-2: #FFFFFF0D; --color-workflow-display-vignette-dark: #00000066; --color-workflow-display-highlight: #FFFFFF1F; --color-workflow-display-outline: #18181BF2; @@ -431,6 +546,7 @@ html[data-theme="dark"] { --color-util-colors-orange-orange-500: #EF6820; --color-util-colors-orange-orange-600: #F38744; --color-util-colors-orange-orange-700: #F7B27A; + --color-util-colors-orange-orange-100-transparent: #77291700; --color-util-colors-pink-pink-50: #4E0D30; --color-util-colors-pink-pink-100: #851651; @@ -606,4 +722,16 @@ html[data-theme="dark"] { --color-third-party-LangChain: #FFFFFF; --color-third-party-Langfuse: #FFFFFF; --color-third-party-Github: #FFFFFF; + --color-third-party-Github-tertiary: #C8CEDA99; + --color-third-party-Github-secondary: #D9D9DE; + --color-third-party-model-bg-openai: #121212; + --color-third-party-model-bg-anthropic: #1D1917; + --color-third-party-model-bg-default: #0B0B0E; + + --color-third-party-aws: #141F2E; + --color-third-party-aws-alt: #192639; + + --color-saas-background: #0B0B0E; + --color-saas-pricing-grid-bg: #C8CEDA33; + } \ No newline at end of file diff --git a/web/themes/light.css b/web/themes/light.css index 3b9c15505c97cc..6fb2a6b00caf0b 100644 --- a/web/themes/light.css +++ b/web/themes/light.css @@ -85,8 +85,12 @@ html[data-theme="light"] { --color-components-button-secondary-accent-border-hover: #10182824; --color-components-button-secondary-accent-border-disabled: #1018280A; + --color-components-button-indigo-bg: #444CE7; + --color-components-button-indigo-bg-hover: #3538CD; + --color-components-button-indigo-bg-disabled: #6172F324; + --color-components-checkbox-icon: #FFFFFF; - --color-components-checkbox-icon-disabled: #D0D5DC; + --color-components-checkbox-icon-disabled: #FFFFFF80; --color-components-checkbox-bg: #155AEF; --color-components-checkbox-bg-hover: #004AEB; --color-components-checkbox-bg-disabled: #F2F4F7; @@ -95,10 +99,11 @@ html[data-theme="light"] { --color-components-checkbox-border-disabled: #18181B0A; --color-components-checkbox-bg-unchecked: #FFFFFF; --color-components-checkbox-bg-unchecked-hover: #FFFFFF; + --color-components-checkbox-bg-disabled-checked: #B2CAFF; --color-components-radio-border-checked: #155AEF; --color-components-radio-border-checked-hover: #004AEB; - --color-components-radio-border-checked-disabled: #F2F4F7; + --color-components-radio-border-checked-disabled: #B2CAFF; --color-components-radio-bg-disabled: #FFFFFF00; --color-components-radio-border: #D0D5DC; --color-components-radio-border-hover: #98A2B2; @@ -135,6 +140,9 @@ html[data-theme="light"] { --color-components-panel-on-panel-item-bg: #FFFFFF; --color-components-panel-on-panel-item-bg-hover: #F9FAFB; --color-components-panel-on-panel-item-bg-alt: #F9FAFB; + --color-components-panel-on-panel-item-bg-transparent: #FFFFFFF2; + --color-components-panel-on-panel-item-bg-hover-transparent: #F9FAFB00; + --color-components-panel-on-panel-item-bg-destructive-hover-transparent: #FEF3F200; --color-components-panel-bg-transparent: #FFFFFF00; @@ -161,10 +169,10 @@ html[data-theme="light"] { --color-components-segmented-control-item-active-accent-bg: #FFFFFF; --color-components-segmented-control-item-active-accent-border: #FFFFFF; - --color-components-option-card-option-bg: #F9FAFB; + --color-components-option-card-option-bg: #FCFCFD; --color-components-option-card-option-selected-bg: #FFFFFF; --color-components-option-card-option-selected-border: #296DFF; - --color-components-option-card-option-border: #F2F4F7; + --color-components-option-card-option-border: #E9EBF0; --color-components-option-card-option-bg-hover: #FFFFFF; --color-components-option-card-option-border-hover: #D0D5DC; @@ -208,10 +216,12 @@ html[data-theme="light"] { --color-components-actionbar-bg: #FFFFFFF2; --color-components-actionbar-border: #1018280A; + --color-components-actionbar-bg-accent: #F5F7FF; + --color-components-actionbar-border-accent: #B2CAFF; --color-components-dropzone-bg-alt: #F2F4F7; --color-components-dropzone-bg: #F9FAFB; - --color-components-dropzone-bg-accent: #EFF4FF; + --color-components-dropzone-bg-accent: #155AEF24; --color-components-dropzone-border: #10182814; --color-components-dropzone-border-alt: #10182833; --color-components-dropzone-border-accent: #84ABFF; @@ -228,6 +238,14 @@ html[data-theme="light"] { --color-components-progress-gray-border: #98A2B2; --color-components-progress-gray-bg: #C8CEDA05; + --color-components-progress-warning-progress: #F79009; + --color-components-progress-warning-border: #F79009; + --color-components-progress-warning-bg: #F790090A; + + --color-components-progress-error-progress: #F04438; + --color-components-progress-error-border: #F04438; + --color-components-progress-error-bg: #F044380A; + --color-components-chat-input-audio-bg: #EFF4FF; --color-components-chat-input-audio-wave-default: #155AEF33; --color-components-chat-input-bg-mask-1: #FFFFFF03; @@ -236,13 +254,103 @@ html[data-theme="light"] { --color-components-chat-input-audio-wave-active: #296DFF; --color-components-chat-input-audio-bg-alt: #FCFCFD; - --color-components-Avatar-shape-fill-stop-0: #FFFFFF; - --color-components-Avatar-shape-fill-stop-100: #FFFFFFE5; - - --color-components-Avatar-bg-mask-stop-0: #FFFFFF1F; - --color-components-Avatar-bg-mask-stop-100: #FFFFFF14; - - --color-components-Avatar-default-avatar-bg: #D0D5DC; + --color-components-avatar-shape-fill-stop-0: #FFFFFF; + --color-components-avatar-shape-fill-stop-100: #FFFFFFE5; + + --color-components-avatar-bg-mask-stop-0: #FFFFFF1F; + --color-components-avatar-bg-mask-stop-100: #FFFFFF14; + + --color-components-avatar-default-avatar-bg: #D0D5DC; + --color-components-avatar-mask-darkmode-dimmed: #FFFFFF00; + + --color-components-label-gray: #F2F4F7; + + --color-components-premium-badge-blue-bg-stop-0: #5289FF; + --color-components-premium-badge-blue-bg-stop-100: #155AEF; + --color-components-premium-badge-blue-stroke-stop-0: #FFFFFFF2; + --color-components-premium-badge-blue-stroke-stop-100: #155AEF; + --color-components-premium-badge-blue-text-stop-0: #F5F7FF; + --color-components-premium-badge-blue-text-stop-100: #D1E0FF; + --color-components-premium-badge-blue-glow: #00329E; + --color-components-premium-badge-blue-bg-stop-0-hover: #296DFF; + --color-components-premium-badge-blue-bg-stop-100-hover: #004AEB; + --color-components-premium-badge-blue-glow-hover: #84ABFF; + --color-components-premium-badge-blue-stroke-stop-0-hover: #FFFFFFF2; + --color-components-premium-badge-blue-stroke-stop-100-hover: #00329E; + + --color-components-premium-badge-highlight-stop-0: #FFFFFF1F; + --color-components-premium-badge-highlight-stop-100: #FFFFFF4D; + --color-components-premium-badge-indigo-bg-stop-0: #8098F9; + --color-components-premium-badge-indigo-bg-stop-100: #444CE7; + --color-components-premium-badge-indigo-stroke-stop-0: #FFFFFFF2; + --color-components-premium-badge-indigo-stroke-stop-100: #6172F3; + --color-components-premium-badge-indigo-text-stop-0: #F5F8FF; + --color-components-premium-badge-indigo-text-stop-100: #E0EAFF; + --color-components-premium-badge-indigo-glow: #2D3282; + --color-components-premium-badge-indigo-glow-hover: #A4BCFD; + --color-components-premium-badge-indigo-bg-stop-0-hover: #6172F3; + --color-components-premium-badge-indigo-bg-stop-100-hover: #2D31A6; + --color-components-premium-badge-indigo-stroke-stop-0-hover: #FFFFFFF2; + --color-components-premium-badge-indigo-stroke-stop-100-hover: #2D31A6; + + --color-components-premium-badge-grey-bg-stop-0: #98A2B2; + --color-components-premium-badge-grey-bg-stop-100: #676F83; + --color-components-premium-badge-grey-stroke-stop-0: #FFFFFFF2; + --color-components-premium-badge-grey-stroke-stop-100: #676F83; + --color-components-premium-badge-grey-text-stop-0: #FCFCFD; + --color-components-premium-badge-grey-text-stop-100: #F2F4F7; + --color-components-premium-badge-grey-glow: #101828; + --color-components-premium-badge-grey-glow-hover: #D0D5DC; + --color-components-premium-badge-grey-bg-stop-0-hover: #676F83; + --color-components-premium-badge-grey-bg-stop-100-hover: #354052; + --color-components-premium-badge-grey-stroke-stop-0-hover: #FFFFFFF2; + --color-components-premium-badge-grey-stroke-stop-100-hover: #354052; + + --color-components-premium-badge-orange-bg-stop-0: #FF692E; + --color-components-premium-badge-orange-bg-stop-100: #E04F16; + --color-components-premium-badge-orange-stroke-stop-0: #FFFFFFF2; + --color-components-premium-badge-orange-stroke-stop-100: #E62E05; + --color-components-premium-badge-orange-text-stop-0: #FEFAF5; + --color-components-premium-badge-orange-text-stop-100: #FDEAD7; + --color-components-premium-badge-orange-glow: #772917; + --color-components-premium-badge-orange-glow-hover: #F7B27A; + --color-components-premium-badge-orange-bg-stop-0-hover: #FF4405; + --color-components-premium-badge-orange-bg-stop-100-hover: #B93815; + --color-components-premium-badge-orange-stroke-stop-0-hover: #FFFFFFF2; + --color-components-premium-badge-orange-stroke-stop-100-hover: #BC1B06; + + --color-components-progress-bar-bg: #155AEF0A; + --color-components-progress-bar-progress: #155AEF24; + --color-components-progress-bar-border: #1018280A; + --color-components-progress-bar-progress-solid: #296DFF; + --color-components-progress-bar-progress-highlight: #155AEF33; + + --color-components-icon-bg-red-solid: #D92D20; + --color-components-icon-bg-rose-solid: #E31B54; + --color-components-icon-bg-pink-solid: #DD2590; + --color-components-icon-bg-orange-dark-solid: #FF4405; + --color-components-icon-bg-yellow-solid: #EAAA08; + --color-components-icon-bg-green-solid: #4CA30D; + --color-components-icon-bg-teal-solid: #0E9384; + --color-components-icon-bg-blue-light-solid: #0BA5EC; + --color-components-icon-bg-blue-solid: #155AEF; + --color-components-icon-bg-indigo-solid: #444CE7; + --color-components-icon-bg-violet-solid: #7839EE; + --color-components-icon-bg-midnight-solid: #828DAD; + --color-components-icon-bg-rose-soft: #FFF1F3; + --color-components-icon-bg-pink-soft: #FDF2FA; + --color-components-icon-bg-orange-dark-soft: #FFF4ED; + --color-components-icon-bg-yellow-soft: #FEFBE8; + --color-components-icon-bg-green-soft: #F3FEE7; + --color-components-icon-bg-teal-soft: #F0FDF9; + --color-components-icon-bg-blue-light-soft: #F0F9FF; + --color-components-icon-bg-blue-soft: #EFF4FF; + --color-components-icon-bg-indigo-soft: #EEF4FF; + --color-components-icon-bg-violet-soft: #F5F3FF; + --color-components-icon-bg-midnight-soft: #F0F2F5; + --color-components-icon-bg-red-soft: #FEF3F2; + --color-components-icon-bg-orange-solid: #F79009; + --color-components-icon-bg-orange-soft: #FFFAEB; --color-text-primary: #101828; --color-text-secondary: #354052; @@ -302,6 +410,7 @@ html[data-theme="light"] { --color-background-overlay-alt: #10182866; --color-background-surface-white: #FFFFFFF2; --color-background-overlay-destructive: #F044384D; + --color-background-overlay-backdrop: #F2F4F7F2; --color-shadow-shadow-1: #09090B08; --color-shadow-shadow-3: #09090B0D; @@ -317,6 +426,7 @@ html[data-theme="light"] { --color-workflow-block-border: #FFFFFF; --color-workflow-block-parma-bg: #F2F4F7; --color-workflow-block-bg: #FCFCFD; + --color-workflow-block-bg-transparent: #FCFCFDE5; --color-workflow-block-border-highlight: #155AEF24; --color-workflow-canvas-workflow-dot-color: #8585AD26; @@ -436,6 +546,7 @@ html[data-theme="light"] { --color-util-colors-orange-orange-500: #EF6820; --color-util-colors-orange-orange-600: #E04F16; --color-util-colors-orange-orange-700: #B93815; + --color-util-colors-orange-orange-100-transparent: #FDEAD700; --color-util-colors-pink-pink-50: #FDF2FA; --color-util-colors-pink-pink-100: #FCE7F6; @@ -610,6 +721,17 @@ html[data-theme="light"] { --color-third-party-LangChain: #1C3C3C; --color-third-party-Langfuse: #000000; - --color-third-party-Github: #1B1F24; + --color-third-party-Github-tertiary: #1B1F24; + --color-third-party-Github-secondary: #1B1F24; + --color-third-party-model-bg-openai: #E3E5E8; + --color-third-party-model-bg-anthropic: #EEEDE7; + --color-third-party-model-bg-default: #F9FAFB; + + --color-third-party-aws: #141F2E; + --color-third-party-aws-alt: #0F1824; + + --color-saas-background: #FCFCFD; + --color-saas-pricing-grid-bg: #C8CEDA80; + } \ No newline at end of file diff --git a/web/themes/tailwind-theme-var-define.ts b/web/themes/tailwind-theme-var-define.ts index a81e224e9dd420..6329ce3d26e11d 100644 --- a/web/themes/tailwind-theme-var-define.ts +++ b/web/themes/tailwind-theme-var-define.ts @@ -85,6 +85,10 @@ const vars = { 'components-button-secondary-accent-border-hover': 'var(--color-components-button-secondary-accent-border-hover)', 'components-button-secondary-accent-border-disabled': 'var(--color-components-button-secondary-accent-border-disabled)', + 'components-button-indigo-bg': 'var(--color-components-button-indigo-bg)', + 'components-button-indigo-bg-hover': 'var(--color-components-button-indigo-bg-hover)', + 'components-button-indigo-bg-disabled': 'var(--color-components-button-indigo-bg-disabled)', + 'components-checkbox-icon': 'var(--color-components-checkbox-icon)', 'components-checkbox-icon-disabled': 'var(--color-components-checkbox-icon-disabled)', 'components-checkbox-bg': 'var(--color-components-checkbox-bg)', @@ -95,6 +99,7 @@ const vars = { 'components-checkbox-border-disabled': 'var(--color-components-checkbox-border-disabled)', 'components-checkbox-bg-unchecked': 'var(--color-components-checkbox-bg-unchecked)', 'components-checkbox-bg-unchecked-hover': 'var(--color-components-checkbox-bg-unchecked-hover)', + 'components-checkbox-bg-disabled-checked': 'var(--color-components-checkbox-bg-disabled-checked)', 'components-radio-border-checked': 'var(--color-components-radio-border-checked)', 'components-radio-border-checked-hover': 'var(--color-components-radio-border-checked-hover)', @@ -135,6 +140,9 @@ const vars = { 'components-panel-on-panel-item-bg': 'var(--color-components-panel-on-panel-item-bg)', 'components-panel-on-panel-item-bg-hover': 'var(--color-components-panel-on-panel-item-bg-hover)', 'components-panel-on-panel-item-bg-alt': 'var(--color-components-panel-on-panel-item-bg-alt)', + 'components-panel-on-panel-item-bg-transparent': 'var(--color-components-panel-on-panel-item-bg-transparent)', + 'components-panel-on-panel-item-bg-hover-transparent': 'var(--color-components-panel-on-panel-item-bg-hover-transparent)', + 'components-panel-on-panel-item-bg-destructive-hover-transparent': 'var(--color-components-panel-on-panel-item-bg-destructive-hover-transparent)', 'components-panel-bg-transparent': 'var(--color-components-panel-bg-transparent)', @@ -208,6 +216,8 @@ const vars = { 'components-actionbar-bg': 'var(--color-components-actionbar-bg)', 'components-actionbar-border': 'var(--color-components-actionbar-border)', + 'components-actionbar-bg-accent': 'var(--color-components-actionbar-bg-accent)', + 'components-actionbar-border-accent': 'var(--color-components-actionbar-border-accent)', 'components-dropzone-bg-alt': 'var(--color-components-dropzone-bg-alt)', 'components-dropzone-bg': 'var(--color-components-dropzone-bg)', @@ -228,6 +238,14 @@ const vars = { 'components-progress-gray-border': 'var(--color-components-progress-gray-border)', 'components-progress-gray-bg': 'var(--color-components-progress-gray-bg)', + 'components-progress-warning-progress': 'var(--color-components-progress-warning-progress)', + 'components-progress-warning-border': 'var(--color-components-progress-warning-border)', + 'components-progress-warning-bg': 'var(--color-components-progress-warning-bg)', + + 'components-progress-error-progress': 'var(--color-components-progress-error-progress)', + 'components-progress-error-border': 'var(--color-components-progress-error-border)', + 'components-progress-error-bg': 'var(--color-components-progress-error-bg)', + 'components-chat-input-audio-bg': 'var(--color-components-chat-input-audio-bg)', 'components-chat-input-audio-wave-default': 'var(--color-components-chat-input-audio-wave-default)', 'components-chat-input-bg-mask-1': 'var(--color-components-chat-input-bg-mask-1)', @@ -236,13 +254,103 @@ const vars = { 'components-chat-input-audio-wave-active': 'var(--color-components-chat-input-audio-wave-active)', 'components-chat-input-audio-bg-alt': 'var(--color-components-chat-input-audio-bg-alt)', - 'components-Avatar-shape-fill-stop-0': 'var(--color-components-Avatar-shape-fill-stop-0)', - 'components-Avatar-shape-fill-stop-100': 'var(--color-components-Avatar-shape-fill-stop-100)', - - 'components-Avatar-bg-mask-stop-0': 'var(--color-components-Avatar-bg-mask-stop-0)', - 'components-Avatar-bg-mask-stop-100': 'var(--color-components-Avatar-bg-mask-stop-100)', - - 'components-Avatar-default-avatar-bg': 'var(--color-components-Avatar-default-avatar-bg)', + 'components-avatar-shape-fill-stop-0': 'var(--color-components-avatar-shape-fill-stop-0)', + 'components-avatar-shape-fill-stop-100': 'var(--color-components-avatar-shape-fill-stop-100)', + + 'components-avatar-bg-mask-stop-0': 'var(--color-components-avatar-bg-mask-stop-0)', + 'components-avatar-bg-mask-stop-100': 'var(--color-components-avatar-bg-mask-stop-100)', + + 'components-avatar-default-avatar-bg': 'var(--color-components-avatar-default-avatar-bg)', + 'components-avatar-mask-darkmode-dimmed': 'var(--color-components-avatar-mask-darkmode-dimmed)', + + 'components-label-gray': 'var(--color-components-label-gray)', + + 'components-premium-badge-blue-bg-stop-0': 'var(--color-components-premium-badge-blue-bg-stop-0)', + 'components-premium-badge-blue-bg-stop-100': 'var(--color-components-premium-badge-blue-bg-stop-100)', + 'components-premium-badge-blue-stroke-stop-0': 'var(--color-components-premium-badge-blue-stroke-stop-0)', + 'components-premium-badge-blue-stroke-stop-100': 'var(--color-components-premium-badge-blue-stroke-stop-100)', + 'components-premium-badge-blue-text-stop-0': 'var(--color-components-premium-badge-blue-text-stop-0)', + 'components-premium-badge-blue-text-stop-100': 'var(--color-components-premium-badge-blue-text-stop-100)', + 'components-premium-badge-blue-glow': 'var(--color-components-premium-badge-blue-glow)', + 'components-premium-badge-blue-bg-stop-0-hover': 'var(--color-components-premium-badge-blue-bg-stop-0-hover)', + 'components-premium-badge-blue-bg-stop-100-hover': 'var(--color-components-premium-badge-blue-bg-stop-100-hover)', + 'components-premium-badge-blue-glow-hover': 'var(--color-components-premium-badge-blue-glow-hover)', + 'components-premium-badge-blue-stroke-stop-0-hover': 'var(--color-components-premium-badge-blue-stroke-stop-0-hover)', + 'components-premium-badge-blue-stroke-stop-100-hover': 'var(--color-components-premium-badge-blue-stroke-stop-100-hover)', + + 'components-premium-badge-highlight-stop-0': 'var(--color-components-premium-badge-highlight-stop-0)', + 'components-premium-badge-highlight-stop-100': 'var(--color-components-premium-badge-highlight-stop-100)', + 'components-premium-badge-indigo-bg-stop-0': 'var(--color-components-premium-badge-indigo-bg-stop-0)', + 'components-premium-badge-indigo-bg-stop-100': 'var(--color-components-premium-badge-indigo-bg-stop-100)', + 'components-premium-badge-indigo-stroke-stop-0': 'var(--color-components-premium-badge-indigo-stroke-stop-0)', + 'components-premium-badge-indigo-stroke-stop-100': 'var(--color-components-premium-badge-indigo-stroke-stop-100)', + 'components-premium-badge-indigo-text-stop-0': 'var(--color-components-premium-badge-indigo-text-stop-0)', + 'components-premium-badge-indigo-text-stop-100': 'var(--color-components-premium-badge-indigo-text-stop-100)', + 'components-premium-badge-indigo-glow': 'var(--color-components-premium-badge-indigo-glow)', + 'components-premium-badge-indigo-glow-hover': 'var(--color-components-premium-badge-indigo-glow-hover)', + 'components-premium-badge-indigo-bg-stop-0-hover': 'var(--color-components-premium-badge-indigo-bg-stop-0-hover)', + 'components-premium-badge-indigo-bg-stop-100-hover': 'var(--color-components-premium-badge-indigo-bg-stop-100-hover)', + 'components-premium-badge-indigo-stroke-stop-0-hover': 'var(--color-components-premium-badge-indigo-stroke-stop-0-hover)', + 'components-premium-badge-indigo-stroke-stop-100-hover': 'var(--color-components-premium-badge-indigo-stroke-stop-100-hover)', + + 'components-premium-badge-grey-bg-stop-0': 'var(--color-components-premium-badge-grey-bg-stop-0)', + 'components-premium-badge-grey-bg-stop-100': 'var(--color-components-premium-badge-grey-bg-stop-100)', + 'components-premium-badge-grey-stroke-stop-0': 'var(--color-components-premium-badge-grey-stroke-stop-0)', + 'components-premium-badge-grey-stroke-stop-100': 'var(--color-components-premium-badge-grey-stroke-stop-100)', + 'components-premium-badge-grey-text-stop-0': 'var(--color-components-premium-badge-grey-text-stop-0)', + 'components-premium-badge-grey-text-stop-100': 'var(--color-components-premium-badge-grey-text-stop-100)', + 'components-premium-badge-grey-glow': 'var(--color-components-premium-badge-grey-glow)', + 'components-premium-badge-grey-glow-hover': 'var(--color-components-premium-badge-grey-glow-hover)', + 'components-premium-badge-grey-bg-stop-0-hover': 'var(--color-components-premium-badge-grey-bg-stop-0-hover)', + 'components-premium-badge-grey-bg-stop-100-hover': 'var(--color-components-premium-badge-grey-bg-stop-100-hover)', + 'components-premium-badge-grey-stroke-stop-0-hover': 'var(--color-components-premium-badge-grey-stroke-stop-0-hover)', + 'components-premium-badge-grey-stroke-stop-100-hover': 'var(--color-components-premium-badge-grey-stroke-stop-100-hover)', + + 'components-premium-badge-orange-bg-stop-0': 'var(--color-components-premium-badge-orange-bg-stop-0)', + 'components-premium-badge-orange-bg-stop-100': 'var(--color-components-premium-badge-orange-bg-stop-100)', + 'components-premium-badge-orange-stroke-stop-0': 'var(--color-components-premium-badge-orange-stroke-stop-0)', + 'components-premium-badge-orange-stroke-stop-100': 'var(--color-components-premium-badge-orange-stroke-stop-100)', + 'components-premium-badge-orange-text-stop-0': 'var(--color-components-premium-badge-orange-text-stop-0)', + 'components-premium-badge-orange-text-stop-100': 'var(--color-components-premium-badge-orange-text-stop-100)', + 'components-premium-badge-orange-glow': 'var(--color-components-premium-badge-orange-glow)', + 'components-premium-badge-orange-glow-hover': 'var(--color-components-premium-badge-orange-glow-hover)', + 'components-premium-badge-orange-bg-stop-0-hover': 'var(--color-components-premium-badge-orange-bg-stop-0-hover)', + 'components-premium-badge-orange-bg-stop-100-hover': 'var(--color-components-premium-badge-orange-bg-stop-100-hover)', + 'components-premium-badge-orange-stroke-stop-0-hover': 'var(--color-components-premium-badge-orange-stroke-stop-0-hover)', + 'components-premium-badge-orange-stroke-stop-100-hover': 'var(--color-components-premium-badge-orange-stroke-stop-100-hover)', + + 'components-progress-bar-bg': 'var(--color-components-progress-bar-bg)', + 'components-progress-bar-progress': 'var(--color-components-progress-bar-progress)', + 'components-progress-bar-border': 'var(--color-components-progress-bar-border)', + 'components-progress-bar-progress-solid': 'var(--color-components-progress-bar-progress-solid)', + 'components-progress-bar-progress-highlight': 'var(--color-components-progress-bar-progress-highlight)', + + 'components-icon-bg-red-solid': 'var(--color-components-icon-bg-red-solid)', + 'components-icon-bg-rose-solid': 'var(--color-components-icon-bg-rose-solid)', + 'components-icon-bg-pink-solid': 'var(--color-components-icon-bg-pink-solid)', + 'components-icon-bg-orange-dark-solid': 'var(--color-components-icon-bg-orange-dark-solid)', + 'components-icon-bg-yellow-solid': 'var(--color-components-icon-bg-yellow-solid)', + 'components-icon-bg-green-solid': 'var(--color-components-icon-bg-green-solid)', + 'components-icon-bg-teal-solid': 'var(--color-components-icon-bg-teal-solid)', + 'components-icon-bg-blue-light-solid': 'var(--color-components-icon-bg-blue-light-solid)', + 'components-icon-bg-blue-solid': 'var(--color-components-icon-bg-blue-solid)', + 'components-icon-bg-indigo-solid': 'var(--color-components-icon-bg-indigo-solid)', + 'components-icon-bg-violet-solid': 'var(--color-components-icon-bg-violet-solid)', + 'components-icon-bg-midnight-solid': 'var(--color-components-icon-bg-midnight-solid)', + 'components-icon-bg-rose-soft': 'var(--color-components-icon-bg-rose-soft)', + 'components-icon-bg-pink-soft': 'var(--color-components-icon-bg-pink-soft)', + 'components-icon-bg-orange-dark-soft': 'var(--color-components-icon-bg-orange-dark-soft)', + 'components-icon-bg-yellow-soft': 'var(--color-components-icon-bg-yellow-soft)', + 'components-icon-bg-green-soft': 'var(--color-components-icon-bg-green-soft)', + 'components-icon-bg-teal-soft': 'var(--color-components-icon-bg-teal-soft)', + 'components-icon-bg-blue-light-soft': 'var(--color-components-icon-bg-blue-light-soft)', + 'components-icon-bg-blue-soft': 'var(--color-components-icon-bg-blue-soft)', + 'components-icon-bg-indigo-soft': 'var(--color-components-icon-bg-indigo-soft)', + 'components-icon-bg-violet-soft': 'var(--color-components-icon-bg-violet-soft)', + 'components-icon-bg-midnight-soft': 'var(--color-components-icon-bg-midnight-soft)', + 'components-icon-bg-red-soft': 'var(--color-components-icon-bg-red-soft)', + 'components-icon-bg-orange-solid': 'var(--color-components-icon-bg-orange-solid)', + 'components-icon-bg-orange-soft': 'var(--color-components-icon-bg-orange-soft)', 'text-primary': 'var(--color-text-primary)', 'text-secondary': 'var(--color-text-secondary)', @@ -302,6 +410,7 @@ const vars = { 'background-overlay-alt': 'var(--color-background-overlay-alt)', 'background-surface-white': 'var(--color-background-surface-white)', 'background-overlay-destructive': 'var(--color-background-overlay-destructive)', + 'background-overlay-backdrop': 'var(--color-background-overlay-backdrop)', 'shadow-shadow-1': 'var(--color-shadow-shadow-1)', 'shadow-shadow-3': 'var(--color-shadow-shadow-3)', @@ -317,6 +426,7 @@ const vars = { 'workflow-block-border': 'var(--color-workflow-block-border)', 'workflow-block-parma-bg': 'var(--color-workflow-block-parma-bg)', 'workflow-block-bg': 'var(--color-workflow-block-bg)', + 'workflow-block-bg-transparent': 'var(--color-workflow-block-bg-transparent)', 'workflow-block-border-highlight': 'var(--color-workflow-block-border-highlight)', 'workflow-canvas-workflow-dot-color': 'var(--color-workflow-canvas-workflow-dot-color)', @@ -436,6 +546,7 @@ const vars = { 'util-colors-orange-orange-500': 'var(--color-util-colors-orange-orange-500)', 'util-colors-orange-orange-600': 'var(--color-util-colors-orange-orange-600)', 'util-colors-orange-orange-700': 'var(--color-util-colors-orange-orange-700)', + 'util-colors-orange-orange-100-transparent': 'var(--color-util-colors-orange-orange-100-transparent)', 'util-colors-pink-pink-50': 'var(--color-util-colors-pink-pink-50)', 'util-colors-pink-pink-100': 'var(--color-util-colors-pink-pink-100)', @@ -611,6 +722,17 @@ const vars = { 'third-party-LangChain': 'var(--color-third-party-LangChain)', 'third-party-Langfuse': 'var(--color-third-party-Langfuse)', 'third-party-Github': 'var(--color-third-party-Github)', -} + 'third-party-Github-tertiary': 'var(--color-third-party-Github-tertiary)', + 'third-party-Github-secondary': 'var(--color-third-party-Github-secondary)', + 'third-party-model-bg-openai': 'var(--color-third-party-model-bg-openai)', + 'third-party-model-bg-anthropic': 'var(--color-third-party-model-bg-anthropic)', + 'third-party-model-bg-default': 'var(--color-third-party-model-bg-default)', + + 'third-party-aws': 'var(--color-third-party-aws)', + 'third-party-aws-alt': 'var(--color-third-party-aws-alt)', + 'saas-background': 'var(--color-saas-background)', + 'saas-pricing-grid-bg': 'var(--color-saas-pricing-grid-bg)', + +} export default vars