From 48f872a68c061298007c5fd604c4bfa560aa99ba Mon Sep 17 00:00:00 2001
From: crazywoola <100913391+crazywoola@users.noreply.github.com>
Date: Fri, 19 Jul 2024 18:37:42 +0800
Subject: [PATCH 1/4] fix: build error (#6480)
---
web/app/(commonLayout)/tools/page.tsx | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/web/app/(commonLayout)/tools/page.tsx b/web/app/(commonLayout)/tools/page.tsx
index 4e64d8c0dfe8d3..1b08d54ba3e424 100644
--- a/web/app/(commonLayout)/tools/page.tsx
+++ b/web/app/(commonLayout)/tools/page.tsx
@@ -12,15 +12,16 @@ const Layout: FC = () => {
const { isCurrentWorkspaceDatasetOperator } = useAppContext()
useEffect(() => {
- document.title = `${t('tools.title')} - Dify`
+ if (typeof window !== 'undefined')
+ document.title = `${t('tools.title')} - Dify`
if (isCurrentWorkspaceDatasetOperator)
return router.replace('/datasets')
- }, [])
+ }, [isCurrentWorkspaceDatasetOperator, router, t])
useEffect(() => {
if (isCurrentWorkspaceDatasetOperator)
return router.replace('/datasets')
- }, [isCurrentWorkspaceDatasetOperator])
+ }, [isCurrentWorkspaceDatasetOperator, router])
return
}
From c013086e64033366d1c7baa84498c6ecfdf14965 Mon Sep 17 00:00:00 2001
From: Even
Date: Fri, 19 Jul 2024 20:26:11 +0800
Subject: [PATCH 2/4] fix: next suggest question logic problem (#6451)
Co-authored-by: evenyan
---
api/core/llm_generator/prompts.py | 1 +
api/core/memory/token_buffer_memory.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py
index 170a28432bc0ef..a1737f00c61624 100644
--- a/api/core/llm_generator/prompts.py
+++ b/api/core/llm_generator/prompts.py
@@ -64,6 +64,7 @@
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
"Please help me predict the three most likely questions that human would ask, "
"and keeping each question under 20 characters.\n"
+ "MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n"
"The output must be an array in JSON format following the specified schema:\n"
"[\"question1\",\"question2\",\"question3\"]\n"
)
diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py
index 21f1965e93adac..b33d4dd7cb342c 100644
--- a/api/core/memory/token_buffer_memory.py
+++ b/api/core/memory/token_buffer_memory.py
@@ -103,7 +103,7 @@ def get_history_prompt_messages(self, max_token_limit: int = 2000,
if curr_message_tokens > max_token_limit:
pruned_memory = []
- while curr_message_tokens > max_token_limit and prompt_messages:
+ while curr_message_tokens > max_token_limit and len(prompt_messages)>1:
pruned_memory.append(prompt_messages.pop(0))
curr_message_tokens = self.model_instance.get_llm_num_tokens(
prompt_messages
From 49ef9ef225d215454459e149ca0eff59988a69c0 Mon Sep 17 00:00:00 2001
From: Matri
Date: Fri, 19 Jul 2024 20:32:42 +0800
Subject: [PATCH 3/4] feat(tool): getimg.ai integration (#6260)
---
.../builtin/getimgai/_assets/icon.svg | 1 +
.../provider/builtin/getimgai/getimgai.py | 22 +++
.../provider/builtin/getimgai/getimgai.yaml | 29 +++
.../builtin/getimgai/getimgai_appx.py | 59 +++++++
.../builtin/getimgai/tools/text2image.py | 39 ++++
.../builtin/getimgai/tools/text2image.yaml | 167 ++++++++++++++++++
6 files changed, 317 insertions(+)
create mode 100644 api/core/tools/provider/builtin/getimgai/_assets/icon.svg
create mode 100644 api/core/tools/provider/builtin/getimgai/getimgai.py
create mode 100644 api/core/tools/provider/builtin/getimgai/getimgai.yaml
create mode 100644 api/core/tools/provider/builtin/getimgai/getimgai_appx.py
create mode 100644 api/core/tools/provider/builtin/getimgai/tools/text2image.py
create mode 100644 api/core/tools/provider/builtin/getimgai/tools/text2image.yaml
diff --git a/api/core/tools/provider/builtin/getimgai/_assets/icon.svg b/api/core/tools/provider/builtin/getimgai/_assets/icon.svg
new file mode 100644
index 00000000000000..6b2513386da458
--- /dev/null
+++ b/api/core/tools/provider/builtin/getimgai/_assets/icon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/getimgai/getimgai.py b/api/core/tools/provider/builtin/getimgai/getimgai.py
new file mode 100644
index 00000000000000..c81d5fa333cd5d
--- /dev/null
+++ b/api/core/tools/provider/builtin/getimgai/getimgai.py
@@ -0,0 +1,22 @@
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.getimgai.tools.text2image import Text2ImageTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class GetImgAIProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict) -> None:
+ try:
+ # Example validation using the text2image tool
+ Text2ImageTool().fork_tool_runtime(
+ runtime={"credentials": credentials}
+ ).invoke(
+ user_id='',
+ tool_parameters={
+ "prompt": "A fire egg",
+ "response_format": "url",
+ "style": "photorealism",
+ }
+ )
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/getimgai/getimgai.yaml b/api/core/tools/provider/builtin/getimgai/getimgai.yaml
new file mode 100644
index 00000000000000..c9db0a9e22a6c4
--- /dev/null
+++ b/api/core/tools/provider/builtin/getimgai/getimgai.yaml
@@ -0,0 +1,29 @@
+identity:
+ author: Matri Qi
+ name: getimgai
+ label:
+ en_US: getimg.ai
+ zh_CN: getimg.ai
+ description:
+ en_US: GetImg API integration for image generation and scraping.
+ icon: icon.svg
+ tags:
+ - image
+credentials_for_provider:
+ getimg_api_key:
+ type: secret-input
+ required: true
+ label:
+ en_US: getimg.ai API Key
+ placeholder:
+ en_US: Please input your getimg.ai API key
+ help:
+ en_US: Get your getimg.ai API key from your getimg.ai account settings. If you are using a self-hosted version, you may enter any key at your convenience.
+ url: https://dashboard.getimg.ai/api-keys
+ base_url:
+ type: text-input
+ required: false
+ label:
+ en_US: getimg.ai server's Base URL
+ placeholder:
+ en_US: https://api.getimg.ai/v1
diff --git a/api/core/tools/provider/builtin/getimgai/getimgai_appx.py b/api/core/tools/provider/builtin/getimgai/getimgai_appx.py
new file mode 100644
index 00000000000000..e28c57649cac4c
--- /dev/null
+++ b/api/core/tools/provider/builtin/getimgai/getimgai_appx.py
@@ -0,0 +1,59 @@
+import logging
+import time
+from collections.abc import Mapping
+from typing import Any
+
+import requests
+from requests.exceptions import HTTPError
+
+logger = logging.getLogger(__name__)
+
+class GetImgAIApp:
+ def __init__(self, api_key: str | None = None, base_url: str | None = None):
+ self.api_key = api_key
+ self.base_url = base_url or 'https://api.getimg.ai/v1'
+ if not self.api_key:
+ raise ValueError("API key is required")
+
+ def _prepare_headers(self):
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Authorization': f'Bearer {self.api_key}'
+ }
+ return headers
+
+ def _request(
+ self,
+ method: str,
+ url: str,
+ data: Mapping[str, Any] | None = None,
+ headers: Mapping[str, str] | None = None,
+ retries: int = 3,
+ backoff_factor: float = 0.3,
+ ) -> Mapping[str, Any] | None:
+ for i in range(retries):
+ try:
+ response = requests.request(method, url, json=data, headers=headers)
+ response.raise_for_status()
+ return response.json()
+ except requests.exceptions.RequestException as e:
+ if i < retries - 1 and isinstance(e, HTTPError) and e.response.status_code >= 500:
+ time.sleep(backoff_factor * (2 ** i))
+ else:
+ raise
+ return None
+
+ def text2image(
+ self, mode: str, **kwargs
+ ):
+ data = kwargs['params']
+ if not data.get('prompt'):
+ raise ValueError("Prompt is required")
+
+ endpoint = f'{self.base_url}/{mode}/text-to-image'
+ headers = self._prepare_headers()
+ logger.debug(f"Send request to {endpoint=} body={data}")
+ response = self._request('POST', endpoint, data, headers)
+ if response is None:
+ raise HTTPError("Failed to initiate getimg.ai after multiple retries")
+ return response
diff --git a/api/core/tools/provider/builtin/getimgai/tools/text2image.py b/api/core/tools/provider/builtin/getimgai/tools/text2image.py
new file mode 100644
index 00000000000000..dad7314479a89d
--- /dev/null
+++ b/api/core/tools/provider/builtin/getimgai/tools/text2image.py
@@ -0,0 +1,39 @@
+import json
+from typing import Any, Union
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.provider.builtin.getimgai.getimgai_appx import GetImgAIApp
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class Text2ImageTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ app = GetImgAIApp(api_key=self.runtime.credentials['getimg_api_key'], base_url=self.runtime.credentials['base_url'])
+
+ options = {
+ 'style': tool_parameters.get('style'),
+ 'prompt': tool_parameters.get('prompt'),
+ 'aspect_ratio': tool_parameters.get('aspect_ratio'),
+ 'output_format': tool_parameters.get('output_format', 'jpeg'),
+ 'response_format': tool_parameters.get('response_format', 'url'),
+ 'width': tool_parameters.get('width'),
+ 'height': tool_parameters.get('height'),
+ 'steps': tool_parameters.get('steps'),
+ 'negative_prompt': tool_parameters.get('negative_prompt'),
+ 'prompt_2': tool_parameters.get('prompt_2'),
+ }
+ options = {k: v for k, v in options.items() if v}
+
+ text2image_result = app.text2image(
+ mode=tool_parameters.get('mode', 'essential-v2'),
+ params=options,
+ wait=True
+ )
+
+ if not isinstance(text2image_result, str):
+ text2image_result = json.dumps(text2image_result, ensure_ascii=False, indent=4)
+
+ if not text2image_result:
+ return self.create_text_message("getimg.ai request failed.")
+
+ return self.create_text_message(text2image_result)
diff --git a/api/core/tools/provider/builtin/getimgai/tools/text2image.yaml b/api/core/tools/provider/builtin/getimgai/tools/text2image.yaml
new file mode 100644
index 00000000000000..d972186f56d6a6
--- /dev/null
+++ b/api/core/tools/provider/builtin/getimgai/tools/text2image.yaml
@@ -0,0 +1,167 @@
+identity:
+ name: text2image
+ author: Matri Qi
+ label:
+ en_US: text2image
+ icon: icon.svg
+description:
+ human:
+ en_US: Generate image via getimg.ai.
+ llm: This tool is used to generate image from prompt or image via https://getimg.ai.
+parameters:
+ - name: prompt
+ type: string
+ required: true
+ label:
+ en_US: prompt
+ human_description:
+ en_US: The text prompt used to generate the image. The getimg.aier will generate an image based on this prompt.
+ llm_description: this prompt text will be used to generate image.
+ form: llm
+ - name: mode
+ type: select
+ required: false
+ label:
+ en_US: mode
+ human_description:
+ en_US: The getimg.ai mode to use. The mode determines the endpoint used to generate the image.
+ form: form
+ options:
+ - value: "essential-v2"
+ label:
+ en_US: essential-v2
+ - value: stable-diffusion-xl
+ label:
+ en_US: stable-diffusion-xl
+ - value: stable-diffusion
+ label:
+ en_US: stable-diffusion
+ - value: latent-consistency
+ label:
+ en_US: latent-consistency
+ - name: style
+ type: select
+ required: false
+ label:
+ en_US: style
+ human_description:
+ en_US: The style preset to use. The style preset guides the generation towards a particular style. It's just efficient for `Essential V2` mode.
+ form: form
+ options:
+ - value: photorealism
+ label:
+ en_US: photorealism
+ - value: anime
+ label:
+ en_US: anime
+ - value: art
+ label:
+ en_US: art
+ - name: aspect_ratio
+ type: select
+ required: false
+ label:
+ en_US: "aspect ratio"
+ human_description:
+ en_US: The aspect ratio of the generated image. It's just efficient for `Essential V2` mode.
+ form: form
+ options:
+ - value: "1:1"
+ label:
+ en_US: "1:1"
+ - value: "4:5"
+ label:
+ en_US: "4:5"
+ - value: "5:4"
+ label:
+ en_US: "5:4"
+ - value: "2:3"
+ label:
+ en_US: "2:3"
+ - value: "3:2"
+ label:
+ en_US: "3:2"
+ - value: "4:7"
+ label:
+ en_US: "4:7"
+ - value: "7:4"
+ label:
+ en_US: "7:4"
+ - name: output_format
+ type: select
+ required: false
+ label:
+ en_US: "output format"
+ human_description:
+ en_US: The file format of the generated image.
+ form: form
+ options:
+ - value: jpeg
+ label:
+ en_US: jpeg
+ - value: png
+ label:
+ en_US: png
+ - name: response_format
+ type: select
+ required: false
+ label:
+ en_US: "response format"
+ human_description:
+ en_US: The format in which the generated images are returned. Must be one of url or b64. URLs are only valid for 1 hour after the image has been generated.
+ form: form
+ options:
+ - value: url
+ label:
+ en_US: url
+ - value: b64
+ label:
+ en_US: b64
+ - name: model
+ type: string
+ required: false
+ label:
+ en_US: model
+ human_description:
+ en_US: Model ID supported by this pipeline and family. It's just efficient for `Stable Diffusion XL`, `Stable Diffusion`, `Latent Consistency` mode.
+ form: form
+ - name: negative_prompt
+ type: string
+ required: false
+ label:
+ en_US: negative prompt
+ human_description:
+ en_US: Text input that will not guide the image generation. It's just efficient for `Stable Diffusion XL`, `Stable Diffusion`, `Latent Consistency` mode.
+ form: form
+ - name: prompt_2
+ type: string
+ required: false
+ label:
+ en_US: prompt2
+ human_description:
+ en_US: Prompt sent to second tokenizer and text encoder. If not defined, prompt is used in both text-encoders. It's just efficient for `Stable Diffusion XL` mode.
+ form: form
+ - name: width
+ type: number
+ required: false
+ label:
+ en_US: width
+ human_description:
+ en_US: he width of the generated image in pixels. Width needs to be multiple of 64.
+ form: form
+ - name: height
+ type: number
+ required: false
+ label:
+ en_US: height
+ human_description:
+ en_US: he height of the generated image in pixels. Height needs to be multiple of 64.
+ form: form
+ - name: steps
+ type: number
+ required: false
+ label:
+ en_US: steps
+ human_description:
+ en_US: The number of denoising steps. More steps usually can produce higher quality images, but take more time to generate. It's just efficient for `Stable Diffusion XL`, `Stable Diffusion`, `Latent Consistency` mode.
+ form: form
From 27e08a8e2efa6c9ba89893126ab4609d83a203ec Mon Sep 17 00:00:00 2001
From: Joe <79627742+ZhouhaoJiang@users.noreply.github.com>
Date: Sat, 20 Jul 2024 00:53:31 +0800
Subject: [PATCH 4/4] Fix/extra table tracing app config (#6487)
---
..._remove_extra_tracing_app_config_table .py | 54 +++++++++++++++++++
1 file changed, 54 insertions(+)
create mode 100644 api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py
diff --git a/api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py b/api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py
new file mode 100644
index 00000000000000..271b2490de1055
--- /dev/null
+++ b/api/migrations/versions/fecff1c3da27_remove_extra_tracing_app_config_table .py
@@ -0,0 +1,54 @@
+"""remove extra tracing app config table and add idx_dataset_permissions_tenant_id
+
+Revision ID: fecff1c3da27
+Revises: 408176b91ad3
+Create Date: 2024-07-19 12:03:21.217463
+
+"""
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = 'fecff1c3da27'
+down_revision = '408176b91ad3'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('tracing_app_configs')
+
+ with op.batch_alter_table('trace_app_config', schema=None) as batch_op:
+ batch_op.drop_index('tracing_app_config_app_id_idx')
+
+ # idx_dataset_permissions_tenant_id
+ with op.batch_alter_table('dataset_permissions', schema=None) as batch_op:
+ batch_op.create_index('idx_dataset_permissions_tenant_id', ['tenant_id'])
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table(
+ 'tracing_app_configs',
+ sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
+ sa.Column('app_id', postgresql.UUID(), nullable=False),
+ sa.Column('tracing_provider', sa.String(length=255), nullable=True),
+ sa.Column('tracing_config', postgresql.JSON(astext_type=sa.Text()), nullable=True),
+ sa.Column(
+ 'created_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False
+ ),
+ sa.Column(
+ 'updated_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False
+ ),
+ sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey')
+ )
+
+ with op.batch_alter_table('trace_app_config', schema=None) as batch_op:
+ batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'])
+
+ with op.batch_alter_table('dataset_permissions', schema=None) as batch_op:
+ batch_op.drop_index('idx_dataset_permissions_tenant_id')
+ # ### end Alembic commands ###