diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml
index 8173bee58e8e24..7c632f8a34d56a 100644
--- a/.github/workflows/api-tests.yml
+++ b/.github/workflows/api-tests.yml
@@ -76,7 +76,7 @@ jobs:
- name: Run Workflow
run: poetry run -C api bash dev/pytest/pytest_workflow.sh
- - name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale)
+ - name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch)
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
@@ -90,5 +90,6 @@ jobs:
pgvecto-rs
pgvector
chroma
+ elasticsearch
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh
diff --git a/.github/workflows/expose_service_ports.sh b/.github/workflows/expose_service_ports.sh
index 3418bf0c6f6688..ae3e0ee69d8cfb 100755
--- a/.github/workflows/expose_service_ports.sh
+++ b/.github/workflows/expose_service_ports.sh
@@ -6,5 +6,6 @@ yq eval '.services.chroma.ports += ["8000:8000"]' -i docker/docker-compose.yaml
yq eval '.services["milvus-standalone"].ports += ["19530:19530"]' -i docker/docker-compose.yaml
yq eval '.services.pgvector.ports += ["5433:5432"]' -i docker/docker-compose.yaml
yq eval '.services["pgvecto-rs"].ports += ["5431:5432"]' -i docker/docker-compose.yaml
+yq eval '.services["elasticsearch"].ports += ["9200:9200"]' -i docker/docker-compose.yaml
-echo "Ports exposed for sandbox, weaviate, qdrant, chroma, milvus, pgvector, pgvecto-rs."
\ No newline at end of file
+echo "Ports exposed for sandbox, weaviate, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch"
\ No newline at end of file
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index f6092c86337d85..d681dc66276dd1 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -45,6 +45,10 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -C api dotenv-linter ./api/.env.example ./web/.env.example
+ - name: Ruff formatter check
+ if: steps.changed-files.outputs.any_changed == 'true'
+ run: poetry run -C api ruff format --check ./api
+
- name: Lint hints
if: failure()
run: echo "Please run 'dev/reformat' to fix the fixable linting errors."
diff --git a/README.md b/README.md
index f8c09b50766dde..1c49c415fe09a9 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
Dify Cloud ·
Self-hosting ·
Documentation ·
- Enterprise inquiry
+ Enterprise inquiry
@@ -38,6 +38,7 @@
+
@@ -151,7 +152,7 @@ Quickly get Dify running in your environment with this [starter guide](#quick-st
Use our [documentation](https://docs.dify.ai) for further references and more in-depth instructions.
- **Dify for enterprise / organizations**
-We provide additional enterprise-centric features. [Schedule a meeting with us](https://cal.com/guchenhe/30min) or [send us an email](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) to discuss enterprise needs.
+We provide additional enterprise-centric features. [Log your questions for us through this chatbot](https://udify.app/chat/22L1zSxg6yW1cWQg) or [send us an email](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) to discuss enterprise needs.
> For startups and small businesses using AWS, check out [Dify Premium on AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) and deploy it to your own AWS VPC with one-click. It's an affordable AMI offering with the option to create apps with custom logo and branding.
@@ -220,23 +221,6 @@ At the same time, please consider supporting Dify by sharing it on social media
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
* [Twitter](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
-Or, schedule a meeting directly with a team member:
-
-
-
- Point of Contact
- Purpose
-
-
-
- Business enquiries & product feedback
-
-
-
- Contributions, issues & feature requests
-
-
-
## Star history
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_AR.md b/README_AR.md
index cc5d1e7b2900fb..10d572cc49a83b 100644
--- a/README_AR.md
+++ b/README_AR.md
@@ -4,7 +4,7 @@
Dify Cloud ·
الاستضافة الذاتية ·
التوثيق ·
- استفسارات الشركات
+ استفسار الشركات (للإنجليزية فقط)
@@ -38,6 +38,7 @@
+
@@ -203,23 +204,6 @@ docker compose up -d
* [Discord](https://discord.gg/FngNHpbcY7). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
* [تويتر](https://twitter.com/dify_ai). الأفضل لـ: مشاركة تطبيقاتك والترفيه مع المجتمع.
-أو، قم بجدولة اجتماع مباشرة مع أحد أعضاء الفريق:
-
-
-
- نقطة الاتصال
- الغرض
-
-
-
- استفسارات الأعمال واقتراحات حول المنتج
-
-
-
- المساهمات والمشكلات وطلبات الميزات
-
-
-
## تاريخ النجمة
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_CN.md b/README_CN.md
index 0ff74a5871f466..32551fcc313932 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -4,7 +4,7 @@
Dify 云服务 ·
自托管 ·
文档 ·
-
预约演示
+
(需用英文)常见问题解答 / 联系团队
@@ -29,14 +29,16 @@
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
@@ -156,7 +158,7 @@ Dify 是一个开源的 LLM 应用开发平台。其直观的界面结合了 AI
使用我们的[文档](https://docs.dify.ai)进行进一步的参考和更深入的说明。
- **面向企业/组织的 Dify**
-我们提供额外的面向企业的功能。[与我们安排会议](https://cal.com/guchenhe/30min)或[给我们发送电子邮件](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)讨论企业需求。
+我们提供额外的面向企业的功能。[给我们发送电子邮件](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)讨论企业需求。
> 对于使用 AWS 的初创公司和中小型企业,请查看 [AWS Marketplace 上的 Dify 高级版](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6),并使用一键部署到您自己的 AWS VPC。它是一个价格实惠的 AMI 产品,提供了使用自定义徽标和品牌创建应用程序的选项。
## 保持领先
diff --git a/README_ES.md b/README_ES.md
index 465e06ed0aa051..2ae044b32883b9 100644
--- a/README_ES.md
+++ b/README_ES.md
@@ -4,7 +4,7 @@
Dify Cloud ·
Auto-alojamiento ·
Documentación ·
- Programar demostración
+ Consultas empresariales (en inglés)
@@ -29,14 +29,16 @@
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
#
@@ -156,7 +158,7 @@ Pon rápidamente Dify en funcionamiento en tu entorno con esta [guía de inicio
Usa nuestra [documentación](https://docs.dify.ai) para más referencias e instrucciones más detalladas.
- **Dify para Empresas / Organizaciones**
-Proporcionamos características adicionales centradas en la empresa. [Programa una reunión con nosotros](https://cal.com/guchenhe/30min) o [envíanos un correo electrónico](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) para discutir las necesidades empresariales.
+Proporcionamos características adicionales centradas en la empresa. [Envíanos un correo electrónico](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) para discutir las necesidades empresariales.
> Para startups y pequeñas empresas que utilizan AWS, echa un vistazo a [Dify Premium en AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) e impleméntalo en tu propio VPC de AWS con un clic. Es una AMI asequible que ofrece la opción de crear aplicaciones con logotipo y marca personalizados.
@@ -228,23 +230,6 @@ Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en
* [Discord](https://discord.gg/FngNHpbcY7). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
* [Twitter](https://twitter.com/dify_ai). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
-O, programa una reunión directamente con un miembro del equipo:
-
-
-
- Punto de Contacto
- Propósito
-
-
-
- Consultas comerciales y retroalimentación del producto
-
-
-
- Contribuciones, problemas y solicitudes de características
-
-
-
## Historial de Estrellas
[![Gráfico de Historial de Estrellas](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_FR.md b/README_FR.md
index 2e06f6bc83260a..681d596749c9e7 100644
--- a/README_FR.md
+++ b/README_FR.md
@@ -4,7 +4,7 @@
Dify Cloud ·
Auto-hébergement ·
Documentation ·
- Planifier une démo
+ Demande d’entreprise (en anglais seulement)
@@ -29,14 +29,16 @@
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
#
@@ -156,7 +158,7 @@ Lancez rapidement Dify dans votre environnement avec ce [guide de démarrage](#q
Utilisez notre [documentation](https://docs.dify.ai) pour plus de références et des instructions plus détaillées.
- **Dify pour les entreprises / organisations**
-Nous proposons des fonctionnalités supplémentaires adaptées aux entreprises. [Planifiez une réunion avec nous](https://cal.com/guchenhe/30min) ou [envoyez-nous un e-mail](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) pour discuter des besoins de l'entreprise.
+Nous proposons des fonctionnalités supplémentaires adaptées aux entreprises. [Envoyez-nous un e-mail](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) pour discuter des besoins de l'entreprise.
> Pour les startups et les petites entreprises utilisant AWS, consultez [Dify Premium sur AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) et déployez-le dans votre propre VPC AWS en un clic. C'est une offre AMI abordable avec la possibilité de créer des applications avec un logo et une marque personnalisés.
@@ -226,23 +228,6 @@ Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur le
* [Discord](https://discord.gg/FngNHpbcY7). Meilleur pour: partager vos applications et passer du temps avec la communauté.
* [Twitter](https://twitter.com/dify_ai). Meilleur pour: partager vos applications et passer du temps avec la communauté.
-Ou, planifiez directement une réunion avec un membre de l'équipe:
-
-
-
- Point de contact
- Objectif
-
-
-
- Demandes commerciales & retours produit
-
-
-
- Contributions, problèmes & demandes de fonctionnalités
-
-
-
## Historique des étoiles
[![Graphique de l'historique des étoiles](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_JA.md b/README_JA.md
index 15759d2c69c86f..e6a8621e7baae5 100644
--- a/README_JA.md
+++ b/README_JA.md
@@ -4,7 +4,7 @@
Dify Cloud ·
セルフホスティング ·
ドキュメント ·
- デモの予約
+ 企業のお問い合わせ(英語のみ)
@@ -29,14 +29,16 @@
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
#
@@ -155,7 +157,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
詳しくは[ドキュメント](https://docs.dify.ai)をご覧ください。
- **企業/組織向けのDify**
-企業中心の機能を提供しています。[こちらからミーティングを予約](https://cal.com/guchenhe/30min)したり、[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)して企業のニーズについて相談してください。
+企業中心の機能を提供しています。[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)して企業のニーズについて相談してください。
> AWSを使用しているスタートアップ企業や中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで自分のAWS VPCにデプロイできます。さらに、手頃な価格のAMIオファリングどして、ロゴやブランディングをカスタマイズしてアプリケーションを作成するオプションがあります。
@@ -225,28 +227,6 @@ docker compose up -d
* [Discord](https://discord.gg/FngNHpbcY7). 主に: アプリケーションの共有やコミュニティとの交流。
* [Twitter](https://twitter.com/dify_ai). 主に: アプリケーションの共有やコミュニティとの交流。
-または、直接チームメンバーとミーティングをスケジュール:
-
-
-
- 連絡先
- 目的
-
-
- ミーティング
- 無料の30分間のミーティングをスケジュール
-
-
- 技術サポート
- 技術的な問題やサポートに関する質問
-
-
- 営業担当
- 法人ライセンスに関するお問い合わせ
-
-
## ライセンス
diff --git a/README_KL.md b/README_KL.md
index bc483bcea5a1b5..04620d42bbec8a 100644
--- a/README_KL.md
+++ b/README_KL.md
@@ -4,7 +4,7 @@
Dify Cloud ·
Self-hosting ·
Documentation ·
- Schedule demo
+ Commercial enquiries
@@ -29,14 +29,16 @@
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
#
@@ -156,7 +158,7 @@ Quickly get Dify running in your environment with this [starter guide](#quick-st
Use our [documentation](https://docs.dify.ai) for further references and more in-depth instructions.
- **Dify for Enterprise / Organizations**
-We provide additional enterprise-centric features. [Schedule a meeting with us](https://cal.com/guchenhe/30min) or [send us an email](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) to discuss enterprise needs.
+We provide additional enterprise-centric features. [Send us an email](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) to discuss enterprise needs.
> For startups and small businesses using AWS, check out [Dify Premium on AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) and deploy it to your own AWS VPC with one-click. It's an affordable AMI offering with the option to create apps with custom logo and branding.
@@ -228,23 +230,6 @@ At the same time, please consider supporting Dify by sharing it on social media
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
* [Twitter](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
-Or, schedule a meeting directly with a team member:
-
-
-
- Point of Contact
- Purpose
-
-
-
- Business enquiries & product feedback
-
-
-
- Contributions, issues & feature requests
-
-
-
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_KR.md b/README_KR.md
index dfa103b8f589d2..a5f3bc68d04d74 100644
--- a/README_KR.md
+++ b/README_KR.md
@@ -4,7 +4,7 @@
Dify 클라우드 ·
셀프-호스팅 ·
문서 ·
- 기업 문의
+ 기업 문의 (영어만 가능)
@@ -35,8 +35,10 @@
-
+
+
+
@@ -149,7 +151,7 @@
추가 참조 및 더 심층적인 지침은 [문서](https://docs.dify.ai)를 사용하세요.
- **기업 / 조직을 위한 Dify**
- 우리는 추가적인 기업 중심 기능을 제공합니다. 당사와 [미팅일정](https://cal.com/guchenhe/30min)을 잡거나 [이메일 보내기](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)를 통해 기업 요구 사항을 논의하십시오.
+ 우리는 추가적인 기업 중심 기능을 제공합니다. 잡거나 [이메일 보내기](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)를 통해 기업 요구 사항을 논의하십시오.
> AWS를 사용하는 스타트업 및 중소기업의 경우 [AWS Marketplace에서 Dify Premium](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)을 확인하고 한 번의 클릭으로 자체 AWS VPC에 배포하십시오. 맞춤형 로고와 브랜딩이 포함된 앱을 생성할 수 있는 옵션이 포함된 저렴한 AMI 제품입니다.
@@ -218,22 +220,6 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했
* [디스코드](https://discord.gg/FngNHpbcY7). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
* [트위터](https://twitter.com/dify_ai). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
-또는 팀원과 직접 미팅을 예약하세요:
-
-
-
- 연락처
- 목적
-
-
-
- 비즈니스 문의 및 제품 피드백
-
-
-
- 기여, 이슈 및 기능 요청
-
-
## Star 히스토리
diff --git a/README_TR.md b/README_TR.md
index 2ae7d440a81373..54b6db3f823717 100644
--- a/README_TR.md
+++ b/README_TR.md
@@ -4,7 +4,7 @@
Dify Bulut ·
Kendi Sunucunuzda Barındırma ·
Dokümantasyon ·
- Kurumsal Sorgu
+ Yalnızca İngilizce: Kurumsal Sorgulama
@@ -38,6 +38,7 @@
+
@@ -155,7 +156,7 @@ Bu [başlangıç kılavuzu](#quick-start) ile Dify'ı kendi ortamınızda hızl
Daha fazla referans ve detaylı talimatlar için [dokümantasyonumuzu](https://docs.dify.ai) kullanın.
- **Kurumlar / organizasyonlar için Dify**
-Ek kurumsal odaklı özellikler sunuyoruz. Kurumsal ihtiyaçları görüşmek için [bizimle bir toplantı planlayın](https://cal.com/guchenhe/30min) veya [bize bir e-posta gönderin](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry).
+Ek kurumsal odaklı özellikler sunuyoruz. Kurumsal ihtiyaçları görüşmek için [bize bir e-posta gönderin](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry).
> AWS kullanan startuplar ve küçük işletmeler için, [AWS Marketplace'deki Dify Premium'a](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) göz atın ve tek tıklamayla kendi AWS VPC'nize dağıtın. Bu, özel logo ve marka ile uygulamalar oluşturma seçeneğine sahip uygun fiyatlı bir AMI teklifdir.
## Güncel Kalma
@@ -223,23 +224,6 @@ Aynı zamanda, lütfen Dify'ı sosyal medyada, etkinliklerde ve konferanslarda p
* [Discord](https://discord.gg/FngNHpbcY7). En uygun: uygulamalarınızı paylaşmak ve toplulukla vakit geçirmek için.
* [Twitter](https://twitter.com/dify_ai). En uygun: uygulamalarınızı paylaşmak ve toplulukla vakit geçirmek için.
-Veya doğrudan bir ekip üyesiyle toplantı planlayın:
-
-
-
- İletişim Noktası
- Amaç
-
-
-
- İş sorgulamaları & ürün geri bildirimleri
-
-
-
- Katkılar, sorunlar & özellik istekleri
-
-
-
## Star history
[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
diff --git a/README_VI.md b/README_VI.md
new file mode 100644
index 00000000000000..6d4035eceb06de
--- /dev/null
+++ b/README_VI.md
@@ -0,0 +1,234 @@
+![cover-v5-optimized](https://github.com/langgenius/dify/assets/13230914/f9e19af5-61ba-4119-b926-d10c4c06ebab)
+
+
+ Dify Cloud ·
+ Tự triển khai ·
+ Tài liệu ·
+ Yêu cầu doanh nghiệp
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Dify là một nền tảng phát triển ứng dụng LLM mã nguồn mở. Giao diện trực quan kết hợp quy trình làm việc AI, mô hình RAG, khả năng tác nhân, quản lý mô hình, tính năng quan sát và hơn thế nữa, cho phép bạn nhanh chóng chuyển từ nguyên mẫu sang sản phẩm. Đây là danh sách các tính năng cốt lõi:
+
+
+**1. Quy trình làm việc**:
+ Xây dựng và kiểm tra các quy trình làm việc AI mạnh mẽ trên một canvas trực quan, tận dụng tất cả các tính năng sau đây và hơn thế nữa.
+
+
+ https://github.com/langgenius/dify/assets/13230914/356df23e-1604-483d-80a6-9517ece318aa
+
+
+
+**2. Hỗ trợ mô hình toàn diện**:
+ Tích hợp liền mạch với hàng trăm mô hình LLM độc quyền / mã nguồn mở từ hàng chục nhà cung cấp suy luận và giải pháp tự lưu trữ, bao gồm GPT, Mistral, Llama3, và bất kỳ mô hình tương thích API OpenAI nào. Danh sách đầy đủ các nhà cung cấp mô hình được hỗ trợ có thể được tìm thấy [tại đây](https://docs.dify.ai/getting-started/readme/model-providers).
+
+![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
+
+
+**3. IDE Prompt**:
+ Giao diện trực quan để tạo prompt, so sánh hiệu suất mô hình và thêm các tính năng bổ sung như chuyển văn bản thành giọng nói cho một ứng dụng dựa trên trò chuyện.
+
+**4. Mô hình RAG**:
+ Khả năng RAG mở rộng bao gồm mọi thứ từ nhập tài liệu đến truy xuất, với hỗ trợ sẵn có cho việc trích xuất văn bản từ PDF, PPT và các định dạng tài liệu phổ biến khác.
+
+**5. Khả năng tác nhân**:
+ Bạn có thể định nghĩa các tác nhân dựa trên LLM Function Calling hoặc ReAct, và thêm các công cụ được xây dựng sẵn hoặc tùy chỉnh cho tác nhân. Dify cung cấp hơn 50 công cụ tích hợp sẵn cho các tác nhân AI, như Google Search, DALL·E, Stable Diffusion và WolframAlpha.
+
+**6. LLMOps**:
+ Giám sát và phân tích nhật ký và hiệu suất ứng dụng theo thời gian. Bạn có thể liên tục cải thiện prompt, bộ dữ liệu và mô hình dựa trên dữ liệu sản xuất và chú thích.
+
+**7. Backend-as-a-Service**:
+ Tất cả các dịch vụ của Dify đều đi kèm với các API tương ứng, vì vậy bạn có thể dễ dàng tích hợp Dify vào logic kinh doanh của riêng mình.
+
+
+## So sánh tính năng
+
+
+ Tính năng
+ Dify.AI
+ LangChain
+ Flowise
+ OpenAI Assistants API
+
+
+ Phương pháp lập trình
+ Hướng API + Ứng dụng
+ Mã Python
+ Hướng ứng dụng
+ Hướng API
+
+
+ LLMs được hỗ trợ
+ Đa dạng phong phú
+ Đa dạng phong phú
+ Đa dạng phong phú
+ Chỉ OpenAI
+
+
+ RAG Engine
+ ✅
+ ✅
+ ✅
+ ✅
+
+
+ Agent
+ ✅
+ ✅
+ ❌
+ ✅
+
+
+ Quy trình làm việc
+ ✅
+ ❌
+ ✅
+ ❌
+
+
+ Khả năng quan sát
+ ✅
+ ✅
+ ❌
+ ❌
+
+
+ Tính năng doanh nghiệp (SSO/Kiểm soát truy cập)
+ ✅
+ ❌
+ ❌
+ ❌
+
+
+ Triển khai cục bộ
+ ✅
+ ✅
+ ✅
+ ❌
+
+
+
+## Sử dụng Dify
+
+- **Cloud **
+Chúng tôi lưu trữ dịch vụ [Dify Cloud](https://dify.ai) cho bất kỳ ai muốn thử mà không cần cài đặt. Nó cung cấp tất cả các khả năng của phiên bản tự triển khai và bao gồm 200 lượt gọi GPT-4 miễn phí trong gói sandbox.
+
+- **Tự triển khai Dify Community Edition**
+Nhanh chóng chạy Dify trong môi trường của bạn với [hướng dẫn bắt đầu](#quick-start) này.
+Sử dụng [tài liệu](https://docs.dify.ai) của chúng tôi để tham khảo thêm và nhận hướng dẫn chi tiết hơn.
+
+- **Dify cho doanh nghiệp / tổ chức**
+Chúng tôi cung cấp các tính năng bổ sung tập trung vào doanh nghiệp. [Ghi lại câu hỏi của bạn cho chúng tôi thông qua chatbot này](https://udify.app/chat/22L1zSxg6yW1cWQg) hoặc [gửi email cho chúng tôi](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry) để thảo luận về nhu cầu doanh nghiệp.
+ > Đối với các công ty khởi nghiệp và doanh nghiệp nhỏ sử dụng AWS, hãy xem [Dify Premium trên AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6) và triển khai nó vào AWS VPC của riêng bạn chỉ với một cú nhấp chuột. Đây là một AMI giá cả phải chăng với tùy chọn tạo ứng dụng với logo và thương hiệu tùy chỉnh.
+
+
+## Luôn cập nhật
+
+Yêu thích Dify trên GitHub và được thông báo ngay lập tức về các bản phát hành mới.
+
+![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
+
+
+
+## Bắt đầu nhanh
+> Trước khi cài đặt Dify, hãy đảm bảo máy của bạn đáp ứng các yêu cầu hệ thống tối thiểu sau:
+>
+>- CPU >= 2 Core
+>- RAM >= 4GB
+
+
+
+Cách dễ nhất để khởi động máy chủ Dify là chạy tệp [docker-compose.yml](docker/docker-compose.yaml) của chúng tôi. Trước khi chạy lệnh cài đặt, hãy đảm bảo rằng [Docker](https://docs.docker.com/get-docker/) và [Docker Compose](https://docs.docker.com/compose/install/) đã được cài đặt trên máy của bạn:
+
+```bash
+cd docker
+cp .env.example .env
+docker compose up -d
+```
+
+Sau khi chạy, bạn có thể truy cập bảng điều khiển Dify trong trình duyệt của bạn tại [http://localhost/install](http://localhost/install) và bắt đầu quá trình khởi tạo.
+
+> Nếu bạn muốn đóng góp cho Dify hoặc phát triển thêm, hãy tham khảo [hướng dẫn triển khai từ mã nguồn](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code) của chúng tôi
+
+## Các bước tiếp theo
+
+Nếu bạn cần tùy chỉnh cấu hình, vui lòng tham khảo các nhận xét trong tệp [.env.example](docker/.env.example) của chúng tôi và cập nhật các giá trị tương ứng trong tệp `.env` của bạn. Ngoài ra, bạn có thể cần điều chỉnh tệp `docker-compose.yaml`, chẳng hạn như thay đổi phiên bản hình ảnh, ánh xạ cổng hoặc gắn kết khối lượng, dựa trên môi trường triển khai cụ thể và yêu cầu của bạn. Sau khi thực hiện bất kỳ thay đổi nào, vui lòng chạy lại `docker-compose up -d`. Bạn có thể tìm thấy danh sách đầy đủ các biến môi trường có sẵn [tại đây](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+
+Nếu bạn muốn cấu hình một cài đặt có độ sẵn sàng cao, có các [Helm Charts](https://helm.sh/) và tệp YAML do cộng đồng đóng góp cho phép Dify được triển khai trên Kubernetes.
+
+- [Helm Chart bởi @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify)
+- [Helm Chart bởi @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm)
+- [Tệp YAML bởi @Winson-030](https://github.com/Winson-030/dify-kubernetes)
+
+#### Sử dụng Terraform để Triển khai
+
+##### Azure Global
+Triển khai Dify lên Azure chỉ với một cú nhấp chuột bằng cách sử dụng [terraform](https://www.terraform.io/).
+- [Azure Terraform bởi @nikawang](https://github.com/nikawang/dify-azure-terraform)
+
+## Đóng góp
+
+Đối với những người muốn đóng góp mã, xem [Hướng dẫn Đóng góp](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) của chúng tôi.
+Đồng thời, vui lòng xem xét hỗ trợ Dify bằng cách chia sẻ nó trên mạng xã hội và tại các sự kiện và hội nghị.
+
+
+> Chúng tôi đang tìm kiếm người đóng góp để giúp dịch Dify sang các ngôn ngữ khác ngoài tiếng Trung hoặc tiếng Anh. Nếu bạn quan tâm đến việc giúp đỡ, vui lòng xem [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) để biết thêm thông tin và để lại bình luận cho chúng tôi trong kênh `global-users` của [Máy chủ Cộng đồng Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi.
+
+**Người đóng góp**
+
+
+
+
+
+## Cộng đồng & liên hệ
+
+* [Thảo luận GitHub](https://github.com/langgenius/dify/discussions). Tốt nhất cho: chia sẻ phản hồi và đặt câu hỏi.
+* [Vấn đề GitHub](https://github.com/langgenius/dify/issues). Tốt nhất cho: lỗi bạn gặp phải khi sử dụng Dify.AI và đề xuất tính năng. Xem [Hướng dẫn Đóng góp](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) của chúng tôi.
+* [Discord](https://discord.gg/FngNHpbcY7). Tốt nhất cho: chia sẻ ứng dụng của bạn và giao lưu với cộng đồng.
+* [Twitter](https://twitter.com/dify_ai). Tốt nhất cho: chia sẻ ứng dụng của bạn và giao lưu với cộng đồng.
+
+## Lịch sử Yêu thích
+
+[![Biểu đồ Lịch sử Yêu thích](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date)
+
+## Tiết lộ bảo mật
+
+Để bảo vệ quyền riêng tư của bạn, vui lòng tránh đăng các vấn đề bảo mật trên GitHub. Thay vào đó, hãy gửi câu hỏi của bạn đến security@dify.ai và chúng tôi sẽ cung cấp cho bạn câu trả lời chi tiết hơn.
+
+## Giấy phép
+
+Kho lưu trữ này có sẵn theo [Giấy phép Mã nguồn Mở Dify](LICENSE), về cơ bản là Apache 2.0 với một vài hạn chế bổ sung.
\ No newline at end of file
diff --git a/api/.env.example b/api/.env.example
index cf3a0f302d60cc..f81675fd53810c 100644
--- a/api/.env.example
+++ b/api/.env.example
@@ -130,6 +130,12 @@ TENCENT_VECTOR_DB_DATABASE=dify
TENCENT_VECTOR_DB_SHARD=1
TENCENT_VECTOR_DB_REPLICAS=2
+# ElasticSearch configuration
+ELASTICSEARCH_HOST=127.0.0.1
+ELASTICSEARCH_PORT=9200
+ELASTICSEARCH_USERNAME=elastic
+ELASTICSEARCH_PASSWORD=elastic
+
# PGVECTO_RS configuration
PGVECTO_RS_HOST=localhost
PGVECTO_RS_PORT=5431
@@ -261,4 +267,13 @@ APP_MAX_ACTIVE_REQUESTS=0
# Celery beat configuration
-CELERY_BEAT_SCHEDULER_TIME=1
\ No newline at end of file
+CELERY_BEAT_SCHEDULER_TIME=1
+
+# Position configuration
+POSITION_TOOL_PINS=
+POSITION_TOOL_INCLUDES=
+POSITION_TOOL_EXCLUDES=
+
+POSITION_PROVIDER_PINS=
+POSITION_PROVIDER_INCLUDES=
+POSITION_PROVIDER_EXCLUDES=
diff --git a/api/Dockerfile b/api/Dockerfile
index ac8381faf56b72..06a6f43631e3ab 100644
--- a/api/Dockerfile
+++ b/api/Dockerfile
@@ -12,6 +12,7 @@ ENV POETRY_CACHE_DIR=/tmp/poetry_cache
ENV POETRY_NO_INTERACTION=1
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
ENV POETRY_VIRTUALENVS_CREATE=true
+ENV POETRY_REQUESTS_TIMEOUT=15
FROM base AS packages
@@ -54,6 +55,9 @@ ENV VIRTUAL_ENV=/app/api/.venv
COPY --from=packages ${VIRTUAL_ENV} ${VIRTUAL_ENV}
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
+# Download nltk data
+RUN python -c "import nltk; nltk.download('punkt')"
+
# Copy source code
COPY . /app/api/
diff --git a/api/app.py b/api/app.py
index 50441cb81da1f4..ad219ca0d67459 100644
--- a/api/app.py
+++ b/api/app.py
@@ -1,6 +1,6 @@
import os
-if os.environ.get("DEBUG", "false").lower() != 'true':
+if os.environ.get("DEBUG", "false").lower() != "true":
from gevent import monkey
monkey.patch_all()
@@ -57,7 +57,7 @@
if os.name == "nt":
os.system('tzutil /s "UTC"')
else:
- os.environ['TZ'] = 'UTC'
+ os.environ["TZ"] = "UTC"
time.tzset()
@@ -70,13 +70,14 @@ class DifyApp(Flask):
# -------------
-config_type = os.getenv('EDITION', default='SELF_HOSTED') # ce edition first
+config_type = os.getenv("EDITION", default="SELF_HOSTED") # ce edition first
# ----------------------------
# Application Factory Function
# ----------------------------
+
def create_flask_app_with_configs() -> Flask:
"""
create a raw flask app
@@ -92,7 +93,7 @@ def create_flask_app_with_configs() -> Flask:
elif isinstance(value, int | float | bool):
os.environ[key] = str(value)
elif value is None:
- os.environ[key] = ''
+ os.environ[key] = ""
return dify_app
@@ -100,10 +101,10 @@ def create_flask_app_with_configs() -> Flask:
def create_app() -> Flask:
app = create_flask_app_with_configs()
- app.secret_key = app.config['SECRET_KEY']
+ app.secret_key = app.config["SECRET_KEY"]
log_handlers = None
- log_file = app.config.get('LOG_FILE')
+ log_file = app.config.get("LOG_FILE")
if log_file:
log_dir = os.path.dirname(log_file)
os.makedirs(log_dir, exist_ok=True)
@@ -111,23 +112,24 @@ def create_app() -> Flask:
RotatingFileHandler(
filename=log_file,
maxBytes=1024 * 1024 * 1024,
- backupCount=5
+ backupCount=5,
),
- logging.StreamHandler(sys.stdout)
+ logging.StreamHandler(sys.stdout),
]
logging.basicConfig(
- level=app.config.get('LOG_LEVEL'),
- format=app.config.get('LOG_FORMAT'),
- datefmt=app.config.get('LOG_DATEFORMAT'),
+ level=app.config.get("LOG_LEVEL"),
+ format=app.config.get("LOG_FORMAT"),
+ datefmt=app.config.get("LOG_DATEFORMAT"),
handlers=log_handlers,
- force=True
+ force=True,
)
- log_tz = app.config.get('LOG_TZ')
+ log_tz = app.config.get("LOG_TZ")
if log_tz:
from datetime import datetime
import pytz
+
timezone = pytz.timezone(log_tz)
def time_converter(seconds):
@@ -162,24 +164,24 @@ def initialize_extensions(app):
@login_manager.request_loader
def load_user_from_request(request_from_flask_login):
"""Load user based on the request."""
- if request.blueprint not in ['console', 'inner_api']:
+ if request.blueprint not in ["console", "inner_api"]:
return None
# Check if the user_id contains a dot, indicating the old format
- auth_header = request.headers.get('Authorization', '')
+ auth_header = request.headers.get("Authorization", "")
if not auth_header:
- auth_token = request.args.get('_token')
+ auth_token = request.args.get("_token")
if not auth_token:
- raise Unauthorized('Invalid Authorization token.')
+ raise Unauthorized("Invalid Authorization token.")
else:
- if ' ' not in auth_header:
- raise Unauthorized('Invalid Authorization header format. Expected \'Bearer \' format.')
+ if " " not in auth_header:
+ raise Unauthorized("Invalid Authorization header format. Expected 'Bearer ' format.")
auth_scheme, auth_token = auth_header.split(None, 1)
auth_scheme = auth_scheme.lower()
- if auth_scheme != 'bearer':
- raise Unauthorized('Invalid Authorization header format. Expected \'Bearer \' format.')
+ if auth_scheme != "bearer":
+ raise Unauthorized("Invalid Authorization header format. Expected 'Bearer ' format.")
decoded = PassportService().verify(auth_token)
- user_id = decoded.get('user_id')
+ user_id = decoded.get("user_id")
account = AccountService.load_logged_in_account(account_id=user_id, token=auth_token)
if account:
@@ -190,10 +192,11 @@ def load_user_from_request(request_from_flask_login):
@login_manager.unauthorized_handler
def unauthorized_handler():
"""Handle unauthorized requests."""
- return Response(json.dumps({
- 'code': 'unauthorized',
- 'message': "Unauthorized."
- }), status=401, content_type="application/json")
+ return Response(
+ json.dumps({"code": "unauthorized", "message": "Unauthorized."}),
+ status=401,
+ content_type="application/json",
+ )
# register blueprint routers
@@ -204,38 +207,36 @@ def register_blueprints(app):
from controllers.service_api import bp as service_api_bp
from controllers.web import bp as web_bp
- CORS(service_api_bp,
- allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
- methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH']
- )
+ CORS(
+ service_api_bp,
+ allow_headers=["Content-Type", "Authorization", "X-App-Code"],
+ methods=["GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH"],
+ )
app.register_blueprint(service_api_bp)
- CORS(web_bp,
- resources={
- r"/*": {"origins": app.config['WEB_API_CORS_ALLOW_ORIGINS']}},
- supports_credentials=True,
- allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
- methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'],
- expose_headers=['X-Version', 'X-Env']
- )
+ CORS(
+ web_bp,
+ resources={r"/*": {"origins": app.config["WEB_API_CORS_ALLOW_ORIGINS"]}},
+ supports_credentials=True,
+ allow_headers=["Content-Type", "Authorization", "X-App-Code"],
+ methods=["GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH"],
+ expose_headers=["X-Version", "X-Env"],
+ )
app.register_blueprint(web_bp)
- CORS(console_app_bp,
- resources={
- r"/*": {"origins": app.config['CONSOLE_CORS_ALLOW_ORIGINS']}},
- supports_credentials=True,
- allow_headers=['Content-Type', 'Authorization'],
- methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'],
- expose_headers=['X-Version', 'X-Env']
- )
+ CORS(
+ console_app_bp,
+ resources={r"/*": {"origins": app.config["CONSOLE_CORS_ALLOW_ORIGINS"]}},
+ supports_credentials=True,
+ allow_headers=["Content-Type", "Authorization"],
+ methods=["GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH"],
+ expose_headers=["X-Version", "X-Env"],
+ )
app.register_blueprint(console_app_bp)
- CORS(files_bp,
- allow_headers=['Content-Type'],
- methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH']
- )
+ CORS(files_bp, allow_headers=["Content-Type"], methods=["GET", "PUT", "POST", "DELETE", "OPTIONS", "PATCH"])
app.register_blueprint(files_bp)
app.register_blueprint(inner_api_bp)
@@ -245,29 +246,29 @@ def register_blueprints(app):
app = create_app()
celery = app.extensions["celery"]
-if app.config.get('TESTING'):
+if app.config.get("TESTING"):
print("App is running in TESTING mode")
@app.after_request
def after_request(response):
"""Add Version headers to the response."""
- response.set_cookie('remember_token', '', expires=0)
- response.headers.add('X-Version', app.config['CURRENT_VERSION'])
- response.headers.add('X-Env', app.config['DEPLOY_ENV'])
+ response.set_cookie("remember_token", "", expires=0)
+ response.headers.add("X-Version", app.config["CURRENT_VERSION"])
+ response.headers.add("X-Env", app.config["DEPLOY_ENV"])
return response
-@app.route('/health')
+@app.route("/health")
def health():
- return Response(json.dumps({
- 'pid': os.getpid(),
- 'status': 'ok',
- 'version': app.config['CURRENT_VERSION']
- }), status=200, content_type="application/json")
+ return Response(
+ json.dumps({"pid": os.getpid(), "status": "ok", "version": app.config["CURRENT_VERSION"]}),
+ status=200,
+ content_type="application/json",
+ )
-@app.route('/threads')
+@app.route("/threads")
def threads():
num_threads = threading.active_count()
threads = threading.enumerate()
@@ -278,32 +279,34 @@ def threads():
thread_id = thread.ident
is_alive = thread.is_alive()
- thread_list.append({
- 'name': thread_name,
- 'id': thread_id,
- 'is_alive': is_alive
- })
+ thread_list.append(
+ {
+ "name": thread_name,
+ "id": thread_id,
+ "is_alive": is_alive,
+ }
+ )
return {
- 'pid': os.getpid(),
- 'thread_num': num_threads,
- 'threads': thread_list
+ "pid": os.getpid(),
+ "thread_num": num_threads,
+ "threads": thread_list,
}
-@app.route('/db-pool-stat')
+@app.route("/db-pool-stat")
def pool_stat():
engine = db.engine
return {
- 'pid': os.getpid(),
- 'pool_size': engine.pool.size(),
- 'checked_in_connections': engine.pool.checkedin(),
- 'checked_out_connections': engine.pool.checkedout(),
- 'overflow_connections': engine.pool.overflow(),
- 'connection_timeout': engine.pool.timeout(),
- 'recycle_time': db.engine.pool._recycle
+ "pid": os.getpid(),
+ "pool_size": engine.pool.size(),
+ "checked_in_connections": engine.pool.checkedin(),
+ "checked_out_connections": engine.pool.checkedout(),
+ "overflow_connections": engine.pool.overflow(),
+ "connection_timeout": engine.pool.timeout(),
+ "recycle_time": db.engine.pool._recycle,
}
-if __name__ == '__main__':
- app.run(host='0.0.0.0', port=5001)
+if __name__ == "__main__":
+ app.run(host="0.0.0.0", port=5001)
diff --git a/api/commands.py b/api/commands.py
index c7ffb47b512246..41f1a6444c4581 100644
--- a/api/commands.py
+++ b/api/commands.py
@@ -27,32 +27,29 @@
from services.account_service import RegisterService, TenantService
-@click.command('reset-password', help='Reset the account password.')
-@click.option('--email', prompt=True, help='The email address of the account whose password you need to reset')
-@click.option('--new-password', prompt=True, help='the new password.')
-@click.option('--password-confirm', prompt=True, help='the new password confirm.')
+@click.command("reset-password", help="Reset the account password.")
+@click.option("--email", prompt=True, help="The email address of the account whose password you need to reset")
+@click.option("--new-password", prompt=True, help="the new password.")
+@click.option("--password-confirm", prompt=True, help="the new password confirm.")
def reset_password(email, new_password, password_confirm):
"""
Reset password of owner account
Only available in SELF_HOSTED mode
"""
if str(new_password).strip() != str(password_confirm).strip():
- click.echo(click.style('sorry. The two passwords do not match.', fg='red'))
+ click.echo(click.style("sorry. The two passwords do not match.", fg="red"))
return
- account = db.session.query(Account). \
- filter(Account.email == email). \
- one_or_none()
+ account = db.session.query(Account).filter(Account.email == email).one_or_none()
if not account:
- click.echo(click.style('sorry. the account: [{}] not exist .'.format(email), fg='red'))
+ click.echo(click.style("sorry. the account: [{}] not exist .".format(email), fg="red"))
return
try:
valid_password(new_password)
except:
- click.echo(
- click.style('sorry. The passwords must match {} '.format(password_pattern), fg='red'))
+ click.echo(click.style("sorry. The passwords must match {} ".format(password_pattern), fg="red"))
return
# generate password salt
@@ -65,80 +62,87 @@ def reset_password(email, new_password, password_confirm):
account.password = base64_password_hashed
account.password_salt = base64_salt
db.session.commit()
- click.echo(click.style('Congratulations! Password has been reset.', fg='green'))
+ click.echo(click.style("Congratulations! Password has been reset.", fg="green"))
-@click.command('reset-email', help='Reset the account email.')
-@click.option('--email', prompt=True, help='The old email address of the account whose email you need to reset')
-@click.option('--new-email', prompt=True, help='the new email.')
-@click.option('--email-confirm', prompt=True, help='the new email confirm.')
+@click.command("reset-email", help="Reset the account email.")
+@click.option("--email", prompt=True, help="The old email address of the account whose email you need to reset")
+@click.option("--new-email", prompt=True, help="the new email.")
+@click.option("--email-confirm", prompt=True, help="the new email confirm.")
def reset_email(email, new_email, email_confirm):
"""
Replace account email
:return:
"""
if str(new_email).strip() != str(email_confirm).strip():
- click.echo(click.style('Sorry, new email and confirm email do not match.', fg='red'))
+ click.echo(click.style("Sorry, new email and confirm email do not match.", fg="red"))
return
- account = db.session.query(Account). \
- filter(Account.email == email). \
- one_or_none()
+ account = db.session.query(Account).filter(Account.email == email).one_or_none()
if not account:
- click.echo(click.style('sorry. the account: [{}] not exist .'.format(email), fg='red'))
+ click.echo(click.style("sorry. the account: [{}] not exist .".format(email), fg="red"))
return
try:
email_validate(new_email)
except:
- click.echo(
- click.style('sorry. {} is not a valid email. '.format(email), fg='red'))
+ click.echo(click.style("sorry. {} is not a valid email. ".format(email), fg="red"))
return
account.email = new_email
db.session.commit()
- click.echo(click.style('Congratulations!, email has been reset.', fg='green'))
-
-
-@click.command('reset-encrypt-key-pair', help='Reset the asymmetric key pair of workspace for encrypt LLM credentials. '
- 'After the reset, all LLM credentials will become invalid, '
- 'requiring re-entry.'
- 'Only support SELF_HOSTED mode.')
-@click.confirmation_option(prompt=click.style('Are you sure you want to reset encrypt key pair?'
- ' this operation cannot be rolled back!', fg='red'))
+ click.echo(click.style("Congratulations!, email has been reset.", fg="green"))
+
+
+@click.command(
+ "reset-encrypt-key-pair",
+ help="Reset the asymmetric key pair of workspace for encrypt LLM credentials. "
+ "After the reset, all LLM credentials will become invalid, "
+ "requiring re-entry."
+ "Only support SELF_HOSTED mode.",
+)
+@click.confirmation_option(
+ prompt=click.style(
+ "Are you sure you want to reset encrypt key pair?" " this operation cannot be rolled back!", fg="red"
+ )
+)
def reset_encrypt_key_pair():
"""
Reset the encrypted key pair of workspace for encrypt LLM credentials.
After the reset, all LLM credentials will become invalid, requiring re-entry.
Only support SELF_HOSTED mode.
"""
- if dify_config.EDITION != 'SELF_HOSTED':
- click.echo(click.style('Sorry, only support SELF_HOSTED mode.', fg='red'))
+ if dify_config.EDITION != "SELF_HOSTED":
+ click.echo(click.style("Sorry, only support SELF_HOSTED mode.", fg="red"))
return
tenants = db.session.query(Tenant).all()
for tenant in tenants:
if not tenant:
- click.echo(click.style('Sorry, no workspace found. Please enter /install to initialize.', fg='red'))
+ click.echo(click.style("Sorry, no workspace found. Please enter /install to initialize.", fg="red"))
return
tenant.encrypt_public_key = generate_key_pair(tenant.id)
- db.session.query(Provider).filter(Provider.provider_type == 'custom', Provider.tenant_id == tenant.id).delete()
+ db.session.query(Provider).filter(Provider.provider_type == "custom", Provider.tenant_id == tenant.id).delete()
db.session.query(ProviderModel).filter(ProviderModel.tenant_id == tenant.id).delete()
db.session.commit()
- click.echo(click.style('Congratulations! '
- 'the asymmetric key pair of workspace {} has been reset.'.format(tenant.id), fg='green'))
+ click.echo(
+ click.style(
+ "Congratulations! " "the asymmetric key pair of workspace {} has been reset.".format(tenant.id),
+ fg="green",
+ )
+ )
-@click.command('vdb-migrate', help='migrate vector db.')
-@click.option('--scope', default='all', prompt=False, help='The scope of vector database to migrate, Default is All.')
+@click.command("vdb-migrate", help="migrate vector db.")
+@click.option("--scope", default="all", prompt=False, help="The scope of vector database to migrate, Default is All.")
def vdb_migrate(scope: str):
- if scope in ['knowledge', 'all']:
+ if scope in ["knowledge", "all"]:
migrate_knowledge_vector_database()
- if scope in ['annotation', 'all']:
+ if scope in ["annotation", "all"]:
migrate_annotation_vector_database()
@@ -146,7 +150,7 @@ def migrate_annotation_vector_database():
"""
Migrate annotation datas to target vector database .
"""
- click.echo(click.style('Start migrate annotation data.', fg='green'))
+ click.echo(click.style("Start migrate annotation data.", fg="green"))
create_count = 0
skipped_count = 0
total_count = 0
@@ -154,98 +158,103 @@ def migrate_annotation_vector_database():
while True:
try:
# get apps info
- apps = db.session.query(App).filter(
- App.status == 'normal'
- ).order_by(App.created_at.desc()).paginate(page=page, per_page=50)
+ apps = (
+ db.session.query(App)
+ .filter(App.status == "normal")
+ .order_by(App.created_at.desc())
+ .paginate(page=page, per_page=50)
+ )
except NotFound:
break
page += 1
for app in apps:
total_count = total_count + 1
- click.echo(f'Processing the {total_count} app {app.id}. '
- + f'{create_count} created, {skipped_count} skipped.')
+ click.echo(
+ f"Processing the {total_count} app {app.id}. " + f"{create_count} created, {skipped_count} skipped."
+ )
try:
- click.echo('Create app annotation index: {}'.format(app.id))
- app_annotation_setting = db.session.query(AppAnnotationSetting).filter(
- AppAnnotationSetting.app_id == app.id
- ).first()
+ click.echo("Create app annotation index: {}".format(app.id))
+ app_annotation_setting = (
+ db.session.query(AppAnnotationSetting).filter(AppAnnotationSetting.app_id == app.id).first()
+ )
if not app_annotation_setting:
skipped_count = skipped_count + 1
- click.echo('App annotation setting is disabled: {}'.format(app.id))
+ click.echo("App annotation setting is disabled: {}".format(app.id))
continue
# get dataset_collection_binding info
- dataset_collection_binding = db.session.query(DatasetCollectionBinding).filter(
- DatasetCollectionBinding.id == app_annotation_setting.collection_binding_id
- ).first()
+ dataset_collection_binding = (
+ db.session.query(DatasetCollectionBinding)
+ .filter(DatasetCollectionBinding.id == app_annotation_setting.collection_binding_id)
+ .first()
+ )
if not dataset_collection_binding:
- click.echo('App annotation collection binding is not exist: {}'.format(app.id))
+ click.echo("App annotation collection binding is not exist: {}".format(app.id))
continue
annotations = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app.id).all()
dataset = Dataset(
id=app.id,
tenant_id=app.tenant_id,
- indexing_technique='high_quality',
+ indexing_technique="high_quality",
embedding_model_provider=dataset_collection_binding.provider_name,
embedding_model=dataset_collection_binding.model_name,
- collection_binding_id=dataset_collection_binding.id
+ collection_binding_id=dataset_collection_binding.id,
)
documents = []
if annotations:
for annotation in annotations:
document = Document(
page_content=annotation.question,
- metadata={
- "annotation_id": annotation.id,
- "app_id": app.id,
- "doc_id": annotation.id
- }
+ metadata={"annotation_id": annotation.id, "app_id": app.id, "doc_id": annotation.id},
)
documents.append(document)
- vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
+ vector = Vector(dataset, attributes=["doc_id", "annotation_id", "app_id"])
click.echo(f"Start to migrate annotation, app_id: {app.id}.")
try:
vector.delete()
- click.echo(
- click.style(f'Successfully delete vector index for app: {app.id}.',
- fg='green'))
+ click.echo(click.style(f"Successfully delete vector index for app: {app.id}.", fg="green"))
except Exception as e:
- click.echo(
- click.style(f'Failed to delete vector index for app {app.id}.',
- fg='red'))
+ click.echo(click.style(f"Failed to delete vector index for app {app.id}.", fg="red"))
raise e
if documents:
try:
- click.echo(click.style(
- f'Start to created vector index with {len(documents)} annotations for app {app.id}.',
- fg='green'))
- vector.create(documents)
click.echo(
- click.style(f'Successfully created vector index for app {app.id}.', fg='green'))
+ click.style(
+ f"Start to created vector index with {len(documents)} annotations for app {app.id}.",
+ fg="green",
+ )
+ )
+ vector.create(documents)
+ click.echo(click.style(f"Successfully created vector index for app {app.id}.", fg="green"))
except Exception as e:
- click.echo(click.style(f'Failed to created vector index for app {app.id}.', fg='red'))
+ click.echo(click.style(f"Failed to created vector index for app {app.id}.", fg="red"))
raise e
- click.echo(f'Successfully migrated app annotation {app.id}.')
+ click.echo(f"Successfully migrated app annotation {app.id}.")
create_count += 1
except Exception as e:
click.echo(
- click.style('Create app annotation index error: {} {}'.format(e.__class__.__name__, str(e)),
- fg='red'))
+ click.style(
+ "Create app annotation index error: {} {}".format(e.__class__.__name__, str(e)), fg="red"
+ )
+ )
continue
click.echo(
- click.style(f'Congratulations! Create {create_count} app annotation indexes, and skipped {skipped_count} apps.',
- fg='green'))
+ click.style(
+ f"Congratulations! Create {create_count} app annotation indexes, and skipped {skipped_count} apps.",
+ fg="green",
+ )
+ )
def migrate_knowledge_vector_database():
"""
Migrate vector database datas to target vector database .
"""
- click.echo(click.style('Start migrate vector db.', fg='green'))
+ click.echo(click.style("Start migrate vector db.", fg="green"))
create_count = 0
skipped_count = 0
total_count = 0
@@ -253,87 +262,77 @@ def migrate_knowledge_vector_database():
page = 1
while True:
try:
- datasets = db.session.query(Dataset).filter(Dataset.indexing_technique == 'high_quality') \
- .order_by(Dataset.created_at.desc()).paginate(page=page, per_page=50)
+ datasets = (
+ db.session.query(Dataset)
+ .filter(Dataset.indexing_technique == "high_quality")
+ .order_by(Dataset.created_at.desc())
+ .paginate(page=page, per_page=50)
+ )
except NotFound:
break
page += 1
for dataset in datasets:
total_count = total_count + 1
- click.echo(f'Processing the {total_count} dataset {dataset.id}. '
- + f'{create_count} created, {skipped_count} skipped.')
+ click.echo(
+ f"Processing the {total_count} dataset {dataset.id}. "
+ + f"{create_count} created, {skipped_count} skipped."
+ )
try:
- click.echo('Create dataset vdb index: {}'.format(dataset.id))
+ click.echo("Create dataset vdb index: {}".format(dataset.id))
if dataset.index_struct_dict:
- if dataset.index_struct_dict['type'] == vector_type:
+ if dataset.index_struct_dict["type"] == vector_type:
skipped_count = skipped_count + 1
continue
- collection_name = ''
+ collection_name = ""
if vector_type == VectorType.WEAVIATE:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
- index_struct_dict = {
- "type": VectorType.WEAVIATE,
- "vector_store": {"class_prefix": collection_name}
- }
+ index_struct_dict = {"type": VectorType.WEAVIATE, "vector_store": {"class_prefix": collection_name}}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.QDRANT:
if dataset.collection_binding_id:
- dataset_collection_binding = db.session.query(DatasetCollectionBinding). \
- filter(DatasetCollectionBinding.id == dataset.collection_binding_id). \
- one_or_none()
+ dataset_collection_binding = (
+ db.session.query(DatasetCollectionBinding)
+ .filter(DatasetCollectionBinding.id == dataset.collection_binding_id)
+ .one_or_none()
+ )
if dataset_collection_binding:
collection_name = dataset_collection_binding.collection_name
else:
- raise ValueError('Dataset Collection Bindings is not exist!')
+ raise ValueError("Dataset Collection Bindings is not exist!")
else:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
- index_struct_dict = {
- "type": VectorType.QDRANT,
- "vector_store": {"class_prefix": collection_name}
- }
+ index_struct_dict = {"type": VectorType.QDRANT, "vector_store": {"class_prefix": collection_name}}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.MILVUS:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
- index_struct_dict = {
- "type": VectorType.MILVUS,
- "vector_store": {"class_prefix": collection_name}
- }
+ index_struct_dict = {"type": VectorType.MILVUS, "vector_store": {"class_prefix": collection_name}}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.RELYT:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
- index_struct_dict = {
- "type": 'relyt',
- "vector_store": {"class_prefix": collection_name}
- }
+ index_struct_dict = {"type": "relyt", "vector_store": {"class_prefix": collection_name}}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.TENCENT:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
- index_struct_dict = {
- "type": VectorType.TENCENT,
- "vector_store": {"class_prefix": collection_name}
- }
+ index_struct_dict = {"type": VectorType.TENCENT, "vector_store": {"class_prefix": collection_name}}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.PGVECTOR:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
- index_struct_dict = {
- "type": VectorType.PGVECTOR,
- "vector_store": {"class_prefix": collection_name}
- }
+ index_struct_dict = {"type": VectorType.PGVECTOR, "vector_store": {"class_prefix": collection_name}}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.OPENSEARCH:
dataset_id = dataset.id
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": VectorType.OPENSEARCH,
- "vector_store": {"class_prefix": collection_name}
+ "vector_store": {"class_prefix": collection_name},
}
dataset.index_struct = json.dumps(index_struct_dict)
elif vector_type == VectorType.ANALYTICDB:
@@ -341,9 +340,14 @@ def migrate_knowledge_vector_database():
collection_name = Dataset.gen_collection_name_by_id(dataset_id)
index_struct_dict = {
"type": VectorType.ANALYTICDB,
- "vector_store": {"class_prefix": collection_name}
+ "vector_store": {"class_prefix": collection_name},
}
dataset.index_struct = json.dumps(index_struct_dict)
+ elif vector_type == VectorType.ELASTICSEARCH:
+ dataset_id = dataset.id
+ index_name = Dataset.gen_collection_name_by_id(dataset_id)
+ index_struct_dict = {"type": "elasticsearch", "vector_store": {"class_prefix": index_name}}
+ dataset.index_struct = json.dumps(index_struct_dict)
else:
raise ValueError(f"Vector store {vector_type} is not supported.")
@@ -353,29 +357,41 @@ def migrate_knowledge_vector_database():
try:
vector.delete()
click.echo(
- click.style(f'Successfully delete vector index {collection_name} for dataset {dataset.id}.',
- fg='green'))
+ click.style(
+ f"Successfully delete vector index {collection_name} for dataset {dataset.id}.", fg="green"
+ )
+ )
except Exception as e:
click.echo(
- click.style(f'Failed to delete vector index {collection_name} for dataset {dataset.id}.',
- fg='red'))
+ click.style(
+ f"Failed to delete vector index {collection_name} for dataset {dataset.id}.", fg="red"
+ )
+ )
raise e
- dataset_documents = db.session.query(DatasetDocument).filter(
- DatasetDocument.dataset_id == dataset.id,
- DatasetDocument.indexing_status == 'completed',
- DatasetDocument.enabled == True,
- DatasetDocument.archived == False,
- ).all()
+ dataset_documents = (
+ db.session.query(DatasetDocument)
+ .filter(
+ DatasetDocument.dataset_id == dataset.id,
+ DatasetDocument.indexing_status == "completed",
+ DatasetDocument.enabled == True,
+ DatasetDocument.archived == False,
+ )
+ .all()
+ )
documents = []
segments_count = 0
for dataset_document in dataset_documents:
- segments = db.session.query(DocumentSegment).filter(
- DocumentSegment.document_id == dataset_document.id,
- DocumentSegment.status == 'completed',
- DocumentSegment.enabled == True
- ).all()
+ segments = (
+ db.session.query(DocumentSegment)
+ .filter(
+ DocumentSegment.document_id == dataset_document.id,
+ DocumentSegment.status == "completed",
+ DocumentSegment.enabled == True,
+ )
+ .all()
+ )
for segment in segments:
document = Document(
@@ -385,7 +401,7 @@ def migrate_knowledge_vector_database():
"doc_hash": segment.index_node_hash,
"document_id": segment.document_id,
"dataset_id": segment.dataset_id,
- }
+ },
)
documents.append(document)
@@ -393,37 +409,43 @@ def migrate_knowledge_vector_database():
if documents:
try:
- click.echo(click.style(
- f'Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.',
- fg='green'))
+ click.echo(
+ click.style(
+ f"Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.",
+ fg="green",
+ )
+ )
vector.create(documents)
click.echo(
- click.style(f'Successfully created vector index for dataset {dataset.id}.', fg='green'))
+ click.style(f"Successfully created vector index for dataset {dataset.id}.", fg="green")
+ )
except Exception as e:
- click.echo(click.style(f'Failed to created vector index for dataset {dataset.id}.', fg='red'))
+ click.echo(click.style(f"Failed to created vector index for dataset {dataset.id}.", fg="red"))
raise e
db.session.add(dataset)
db.session.commit()
- click.echo(f'Successfully migrated dataset {dataset.id}.')
+ click.echo(f"Successfully migrated dataset {dataset.id}.")
create_count += 1
except Exception as e:
db.session.rollback()
click.echo(
- click.style('Create dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
- fg='red'))
+ click.style("Create dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
+ )
continue
click.echo(
- click.style(f'Congratulations! Create {create_count} dataset indexes, and skipped {skipped_count} datasets.',
- fg='green'))
+ click.style(
+ f"Congratulations! Create {create_count} dataset indexes, and skipped {skipped_count} datasets.", fg="green"
+ )
+ )
-@click.command('convert-to-agent-apps', help='Convert Agent Assistant to Agent App.')
+@click.command("convert-to-agent-apps", help="Convert Agent Assistant to Agent App.")
def convert_to_agent_apps():
"""
Convert Agent Assistant to Agent App.
"""
- click.echo(click.style('Start convert to agent apps.', fg='green'))
+ click.echo(click.style("Start convert to agent apps.", fg="green"))
proceeded_app_ids = []
@@ -458,7 +480,7 @@ def convert_to_agent_apps():
break
for app in apps:
- click.echo('Converting app: {}'.format(app.id))
+ click.echo("Converting app: {}".format(app.id))
try:
app.mode = AppMode.AGENT_CHAT.value
@@ -470,137 +492,139 @@ def convert_to_agent_apps():
)
db.session.commit()
- click.echo(click.style('Converted app: {}'.format(app.id), fg='green'))
+ click.echo(click.style("Converted app: {}".format(app.id), fg="green"))
except Exception as e:
- click.echo(
- click.style('Convert app error: {} {}'.format(e.__class__.__name__,
- str(e)), fg='red'))
+ click.echo(click.style("Convert app error: {} {}".format(e.__class__.__name__, str(e)), fg="red"))
- click.echo(click.style('Congratulations! Converted {} agent apps.'.format(len(proceeded_app_ids)), fg='green'))
+ click.echo(click.style("Congratulations! Converted {} agent apps.".format(len(proceeded_app_ids)), fg="green"))
-@click.command('add-qdrant-doc-id-index', help='add qdrant doc_id index.')
-@click.option('--field', default='metadata.doc_id', prompt=False, help='index field , default is metadata.doc_id.')
+@click.command("add-qdrant-doc-id-index", help="add qdrant doc_id index.")
+@click.option("--field", default="metadata.doc_id", prompt=False, help="index field , default is metadata.doc_id.")
def add_qdrant_doc_id_index(field: str):
- click.echo(click.style('Start add qdrant doc_id index.', fg='green'))
+ click.echo(click.style("Start add qdrant doc_id index.", fg="green"))
vector_type = dify_config.VECTOR_STORE
if vector_type != "qdrant":
- click.echo(click.style('Sorry, only support qdrant vector store.', fg='red'))
+ click.echo(click.style("Sorry, only support qdrant vector store.", fg="red"))
return
create_count = 0
try:
bindings = db.session.query(DatasetCollectionBinding).all()
if not bindings:
- click.echo(click.style('Sorry, no dataset collection bindings found.', fg='red'))
+ click.echo(click.style("Sorry, no dataset collection bindings found.", fg="red"))
return
import qdrant_client
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models import PayloadSchemaType
from core.rag.datasource.vdb.qdrant.qdrant_vector import QdrantConfig
+
for binding in bindings:
if dify_config.QDRANT_URL is None:
- raise ValueError('Qdrant url is required.')
+ raise ValueError("Qdrant url is required.")
qdrant_config = QdrantConfig(
endpoint=dify_config.QDRANT_URL,
api_key=dify_config.QDRANT_API_KEY,
root_path=current_app.root_path,
timeout=dify_config.QDRANT_CLIENT_TIMEOUT,
grpc_port=dify_config.QDRANT_GRPC_PORT,
- prefer_grpc=dify_config.QDRANT_GRPC_ENABLED
+ prefer_grpc=dify_config.QDRANT_GRPC_ENABLED,
)
try:
client = qdrant_client.QdrantClient(**qdrant_config.to_qdrant_params())
# create payload index
- client.create_payload_index(binding.collection_name, field,
- field_schema=PayloadSchemaType.KEYWORD)
+ client.create_payload_index(binding.collection_name, field, field_schema=PayloadSchemaType.KEYWORD)
create_count += 1
except UnexpectedResponse as e:
# Collection does not exist, so return
if e.status_code == 404:
- click.echo(click.style(f'Collection not found, collection_name:{binding.collection_name}.', fg='red'))
+ click.echo(
+ click.style(f"Collection not found, collection_name:{binding.collection_name}.", fg="red")
+ )
continue
# Some other error occurred, so re-raise the exception
else:
- click.echo(click.style(f'Failed to create qdrant index, collection_name:{binding.collection_name}.', fg='red'))
+ click.echo(
+ click.style(
+ f"Failed to create qdrant index, collection_name:{binding.collection_name}.", fg="red"
+ )
+ )
except Exception as e:
- click.echo(click.style('Failed to create qdrant client.', fg='red'))
+ click.echo(click.style("Failed to create qdrant client.", fg="red"))
- click.echo(
- click.style(f'Congratulations! Create {create_count} collection indexes.',
- fg='green'))
+ click.echo(click.style(f"Congratulations! Create {create_count} collection indexes.", fg="green"))
-@click.command('create-tenant', help='Create account and tenant.')
-@click.option('--email', prompt=True, help='The email address of the tenant account.')
-@click.option('--language', prompt=True, help='Account language, default: en-US.')
+@click.command("create-tenant", help="Create account and tenant.")
+@click.option("--email", prompt=True, help="The email address of the tenant account.")
+@click.option("--language", prompt=True, help="Account language, default: en-US.")
def create_tenant(email: str, language: Optional[str] = None):
"""
Create tenant account
"""
if not email:
- click.echo(click.style('Sorry, email is required.', fg='red'))
+ click.echo(click.style("Sorry, email is required.", fg="red"))
return
# Create account
email = email.strip()
- if '@' not in email:
- click.echo(click.style('Sorry, invalid email address.', fg='red'))
+ if "@" not in email:
+ click.echo(click.style("Sorry, invalid email address.", fg="red"))
return
- account_name = email.split('@')[0]
+ account_name = email.split("@")[0]
if language not in languages:
- language = 'en-US'
+ language = "en-US"
# generate random password
new_password = secrets.token_urlsafe(16)
# register account
- account = RegisterService.register(
- email=email,
- name=account_name,
- password=new_password,
- language=language
- )
+ account = RegisterService.register(email=email, name=account_name, password=new_password, language=language)
TenantService.create_owner_tenant_if_not_exist(account)
- click.echo(click.style('Congratulations! Account and tenant created.\n'
- 'Account: {}\nPassword: {}'.format(email, new_password), fg='green'))
+ click.echo(
+ click.style(
+ "Congratulations! Account and tenant created.\n" "Account: {}\nPassword: {}".format(email, new_password),
+ fg="green",
+ )
+ )
-@click.command('upgrade-db', help='upgrade the database')
+@click.command("upgrade-db", help="upgrade the database")
def upgrade_db():
- click.echo('Preparing database migration...')
- lock = redis_client.lock(name='db_upgrade_lock', timeout=60)
+ click.echo("Preparing database migration...")
+ lock = redis_client.lock(name="db_upgrade_lock", timeout=60)
if lock.acquire(blocking=False):
try:
- click.echo(click.style('Start database migration.', fg='green'))
+ click.echo(click.style("Start database migration.", fg="green"))
# run db migration
import flask_migrate
+
flask_migrate.upgrade()
- click.echo(click.style('Database migration successful!', fg='green'))
+ click.echo(click.style("Database migration successful!", fg="green"))
except Exception as e:
- logging.exception(f'Database migration failed, error: {e}')
+ logging.exception(f"Database migration failed, error: {e}")
finally:
lock.release()
else:
- click.echo('Database migration skipped')
+ click.echo("Database migration skipped")
-@click.command('fix-app-site-missing', help='Fix app related site missing issue.')
+@click.command("fix-app-site-missing", help="Fix app related site missing issue.")
def fix_app_site_missing():
"""
Fix app related site missing issue.
"""
- click.echo(click.style('Start fix app related site missing issue.', fg='green'))
+ click.echo(click.style("Start fix app related site missing issue.", fg="green"))
failed_app_ids = []
while True:
@@ -631,15 +655,14 @@ def fix_app_site_missing():
app_was_created.send(app, account=account)
except Exception as e:
failed_app_ids.append(app_id)
- click.echo(click.style('Fix app {} related site missing issue failed!'.format(app_id), fg='red'))
- logging.exception(f'Fix app related site missing issue failed, error: {e}')
+ click.echo(click.style("Fix app {} related site missing issue failed!".format(app_id), fg="red"))
+ logging.exception(f"Fix app related site missing issue failed, error: {e}")
continue
if not processed_count:
break
-
- click.echo(click.style('Congratulations! Fix app related site missing issue successful!', fg='green'))
+ click.echo(click.style("Congratulations! Fix app related site missing issue successful!", fg="green"))
def register_commands(app):
diff --git a/api/configs/app_config.py b/api/configs/app_config.py
index a5a4fc788d0d19..b277760edd7b2c 100644
--- a/api/configs/app_config.py
+++ b/api/configs/app_config.py
@@ -12,19 +12,14 @@
class DifyConfig(
# Packaging info
PackagingInfo,
-
# Deployment configs
DeploymentConfig,
-
# Feature configs
FeatureConfig,
-
# Middleware configs
MiddlewareConfig,
-
# Extra service configs
ExtraServiceConfig,
-
# Enterprise feature configs
# **Before using, please contact business@dify.ai by email to inquire about licensing matters.**
EnterpriseFeatureConfig,
@@ -36,7 +31,6 @@ class DifyConfig(
env_file='.env',
env_file_encoding='utf-8',
frozen=True,
-
# ignore extra attributes
extra='ignore',
)
@@ -67,3 +61,5 @@ def HTTP_REQUEST_NODE_READABLE_MAX_TEXT_SIZE(self) -> str:
SSRF_PROXY_HTTPS_URL: str | None = None
MODERATION_BUFFER_SIZE: int = Field(default=300, description='The buffer size for moderation.')
+
+ MAX_VARIABLE_SIZE: int = Field(default=5 * 1024, description='The maximum size of a variable. default is 5KB.')
diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py
index 369b25d788a440..ce59a281bcb4c1 100644
--- a/api/configs/feature/__init__.py
+++ b/api/configs/feature/__init__.py
@@ -406,6 +406,7 @@ class DataSetConfig(BaseSettings):
default=False,
)
+
class WorkspaceConfig(BaseSettings):
"""
Workspace configs
@@ -442,6 +443,63 @@ class CeleryBeatConfig(BaseSettings):
)
+class PositionConfig(BaseSettings):
+
+ POSITION_PROVIDER_PINS: str = Field(
+ description='The heads of model providers',
+ default='',
+ )
+
+ POSITION_PROVIDER_INCLUDES: str = Field(
+ description='The included model providers',
+ default='',
+ )
+
+ POSITION_PROVIDER_EXCLUDES: str = Field(
+ description='The excluded model providers',
+ default='',
+ )
+
+ POSITION_TOOL_PINS: str = Field(
+ description='The heads of tools',
+ default='',
+ )
+
+ POSITION_TOOL_INCLUDES: str = Field(
+ description='The included tools',
+ default='',
+ )
+
+ POSITION_TOOL_EXCLUDES: str = Field(
+ description='The excluded tools',
+ default='',
+ )
+
+ @computed_field
+ def POSITION_PROVIDER_PINS_LIST(self) -> list[str]:
+ return [item.strip() for item in self.POSITION_PROVIDER_PINS.split(',') if item.strip() != '']
+
+ @computed_field
+ def POSITION_PROVIDER_INCLUDES_LIST(self) -> list[str]:
+ return [item.strip() for item in self.POSITION_PROVIDER_INCLUDES.split(',') if item.strip() != '']
+
+ @computed_field
+ def POSITION_PROVIDER_EXCLUDES_LIST(self) -> list[str]:
+ return [item.strip() for item in self.POSITION_PROVIDER_EXCLUDES.split(',') if item.strip() != '']
+
+ @computed_field
+ def POSITION_TOOL_PINS_LIST(self) -> list[str]:
+ return [item.strip() for item in self.POSITION_TOOL_PINS.split(',') if item.strip() != '']
+
+ @computed_field
+ def POSITION_TOOL_INCLUDES_LIST(self) -> list[str]:
+ return [item.strip() for item in self.POSITION_TOOL_INCLUDES.split(',') if item.strip() != '']
+
+ @computed_field
+ def POSITION_TOOL_EXCLUDES_LIST(self) -> list[str]:
+ return [item.strip() for item in self.POSITION_TOOL_EXCLUDES.split(',') if item.strip() != '']
+
+
class FeatureConfig(
# place the configs in alphabet order
AppExecutionConfig,
@@ -466,6 +524,7 @@ class FeatureConfig(
UpdateConfig,
WorkflowConfig,
WorkspaceConfig,
+ PositionConfig,
# hosted services config
HostedServiceConfig,
diff --git a/api/configs/packaging/__init__.py b/api/configs/packaging/__init__.py
index 1104e298b1e82c..247fcde655a180 100644
--- a/api/configs/packaging/__init__.py
+++ b/api/configs/packaging/__init__.py
@@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description='Dify version',
- default='0.6.16',
+ default='0.7.0',
)
COMMIT_SHA: str = Field(
diff --git a/api/constants/__init__.py b/api/constants/__init__.py
index 08a27869948c95..e22c3268ef428b 100644
--- a/api/constants/__init__.py
+++ b/api/constants/__init__.py
@@ -1,2 +1 @@
-# TODO: Update all string in code to use this constant
-HIDDEN_VALUE = '[__HIDDEN__]'
\ No newline at end of file
+HIDDEN_VALUE = "[__HIDDEN__]"
diff --git a/api/constants/languages.py b/api/constants/languages.py
index 023d2f18a6f08a..524dc61b5790a4 100644
--- a/api/constants/languages.py
+++ b/api/constants/languages.py
@@ -1,21 +1,22 @@
language_timezone_mapping = {
- 'en-US': 'America/New_York',
- 'zh-Hans': 'Asia/Shanghai',
- 'zh-Hant': 'Asia/Taipei',
- 'pt-BR': 'America/Sao_Paulo',
- 'es-ES': 'Europe/Madrid',
- 'fr-FR': 'Europe/Paris',
- 'de-DE': 'Europe/Berlin',
- 'ja-JP': 'Asia/Tokyo',
- 'ko-KR': 'Asia/Seoul',
- 'ru-RU': 'Europe/Moscow',
- 'it-IT': 'Europe/Rome',
- 'uk-UA': 'Europe/Kyiv',
- 'vi-VN': 'Asia/Ho_Chi_Minh',
- 'ro-RO': 'Europe/Bucharest',
- 'pl-PL': 'Europe/Warsaw',
- 'hi-IN': 'Asia/Kolkata',
- 'tr-TR': 'Europe/Istanbul',
+ "en-US": "America/New_York",
+ "zh-Hans": "Asia/Shanghai",
+ "zh-Hant": "Asia/Taipei",
+ "pt-BR": "America/Sao_Paulo",
+ "es-ES": "Europe/Madrid",
+ "fr-FR": "Europe/Paris",
+ "de-DE": "Europe/Berlin",
+ "ja-JP": "Asia/Tokyo",
+ "ko-KR": "Asia/Seoul",
+ "ru-RU": "Europe/Moscow",
+ "it-IT": "Europe/Rome",
+ "uk-UA": "Europe/Kyiv",
+ "vi-VN": "Asia/Ho_Chi_Minh",
+ "ro-RO": "Europe/Bucharest",
+ "pl-PL": "Europe/Warsaw",
+ "hi-IN": "Asia/Kolkata",
+ "tr-TR": "Europe/Istanbul",
+ "fa-IR": "Asia/Tehran",
}
languages = list(language_timezone_mapping.keys())
@@ -25,6 +26,5 @@ def supported_language(lang):
if lang in languages:
return lang
- error = ('{lang} is not a valid language.'
- .format(lang=lang))
+ error = "{lang} is not a valid language.".format(lang=lang)
raise ValueError(error)
diff --git a/api/constants/model_template.py b/api/constants/model_template.py
index cc5a37025479fd..7e1a196356c4e2 100644
--- a/api/constants/model_template.py
+++ b/api/constants/model_template.py
@@ -5,82 +5,79 @@
default_app_templates = {
# workflow default mode
AppMode.WORKFLOW: {
- 'app': {
- 'mode': AppMode.WORKFLOW.value,
- 'enable_site': True,
- 'enable_api': True
+ "app": {
+ "mode": AppMode.WORKFLOW.value,
+ "enable_site": True,
+ "enable_api": True,
}
},
-
# completion default mode
AppMode.COMPLETION: {
- 'app': {
- 'mode': AppMode.COMPLETION.value,
- 'enable_site': True,
- 'enable_api': True
+ "app": {
+ "mode": AppMode.COMPLETION.value,
+ "enable_site": True,
+ "enable_api": True,
},
- 'model_config': {
- 'model': {
+ "model_config": {
+ "model": {
"provider": "openai",
"name": "gpt-4o",
"mode": "chat",
- "completion_params": {}
+ "completion_params": {},
},
- 'user_input_form': json.dumps([
- {
- "paragraph": {
- "label": "Query",
- "variable": "query",
- "required": True,
- "default": ""
- }
- }
- ]),
- 'pre_prompt': '{{query}}'
+ "user_input_form": json.dumps(
+ [
+ {
+ "paragraph": {
+ "label": "Query",
+ "variable": "query",
+ "required": True,
+ "default": "",
+ },
+ },
+ ]
+ ),
+ "pre_prompt": "{{query}}",
},
-
},
-
# chat default mode
AppMode.CHAT: {
- 'app': {
- 'mode': AppMode.CHAT.value,
- 'enable_site': True,
- 'enable_api': True
+ "app": {
+ "mode": AppMode.CHAT.value,
+ "enable_site": True,
+ "enable_api": True,
},
- 'model_config': {
- 'model': {
+ "model_config": {
+ "model": {
"provider": "openai",
"name": "gpt-4o",
"mode": "chat",
- "completion_params": {}
- }
- }
+ "completion_params": {},
+ },
+ },
},
-
# advanced-chat default mode
AppMode.ADVANCED_CHAT: {
- 'app': {
- 'mode': AppMode.ADVANCED_CHAT.value,
- 'enable_site': True,
- 'enable_api': True
- }
+ "app": {
+ "mode": AppMode.ADVANCED_CHAT.value,
+ "enable_site": True,
+ "enable_api": True,
+ },
},
-
# agent-chat default mode
AppMode.AGENT_CHAT: {
- 'app': {
- 'mode': AppMode.AGENT_CHAT.value,
- 'enable_site': True,
- 'enable_api': True
+ "app": {
+ "mode": AppMode.AGENT_CHAT.value,
+ "enable_site": True,
+ "enable_api": True,
},
- 'model_config': {
- 'model': {
+ "model_config": {
+ "model": {
"provider": "openai",
"name": "gpt-4o",
"mode": "chat",
- "completion_params": {}
- }
- }
- }
+ "completion_params": {},
+ },
+ },
+ },
}
diff --git a/api/contexts/__init__.py b/api/contexts/__init__.py
index 306fac3a931298..623a1a28eb731e 100644
--- a/api/contexts/__init__.py
+++ b/api/contexts/__init__.py
@@ -1,3 +1,7 @@
from contextvars import ContextVar
-tenant_id: ContextVar[str] = ContextVar('tenant_id')
\ No newline at end of file
+from core.workflow.entities.variable_pool import VariablePool
+
+tenant_id: ContextVar[str] = ContextVar("tenant_id")
+
+workflow_variable_pool: ContextVar[VariablePool] = ContextVar("workflow_variable_pool")
diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py
index bef40bea7eb32e..b2b9d8d4967927 100644
--- a/api/controllers/console/__init__.py
+++ b/api/controllers/console/__init__.py
@@ -17,6 +17,7 @@
audio,
completion,
conversation,
+ conversation_variables,
generator,
message,
model_config,
diff --git a/api/controllers/console/app/conversation_variables.py b/api/controllers/console/app/conversation_variables.py
new file mode 100644
index 00000000000000..aa0722ea355ca2
--- /dev/null
+++ b/api/controllers/console/app/conversation_variables.py
@@ -0,0 +1,61 @@
+from flask_restful import Resource, marshal_with, reqparse
+from sqlalchemy import select
+from sqlalchemy.orm import Session
+
+from controllers.console import api
+from controllers.console.app.wraps import get_app_model
+from controllers.console.setup import setup_required
+from controllers.console.wraps import account_initialization_required
+from extensions.ext_database import db
+from fields.conversation_variable_fields import paginated_conversation_variable_fields
+from libs.login import login_required
+from models import ConversationVariable
+from models.model import AppMode
+
+
+class ConversationVariablesApi(Resource):
+ @setup_required
+ @login_required
+ @account_initialization_required
+ @get_app_model(mode=AppMode.ADVANCED_CHAT)
+ @marshal_with(paginated_conversation_variable_fields)
+ def get(self, app_model):
+ parser = reqparse.RequestParser()
+ parser.add_argument('conversation_id', type=str, location='args')
+ args = parser.parse_args()
+
+ stmt = (
+ select(ConversationVariable)
+ .where(ConversationVariable.app_id == app_model.id)
+ .order_by(ConversationVariable.created_at)
+ )
+ if args['conversation_id']:
+ stmt = stmt.where(ConversationVariable.conversation_id == args['conversation_id'])
+ else:
+ raise ValueError('conversation_id is required')
+
+ # NOTE: This is a temporary solution to avoid performance issues.
+ page = 1
+ page_size = 100
+ stmt = stmt.limit(page_size).offset((page - 1) * page_size)
+
+ with Session(db.engine) as session:
+ rows = session.scalars(stmt).all()
+
+ return {
+ 'page': page,
+ 'limit': page_size,
+ 'total': len(rows),
+ 'has_more': False,
+ 'data': [
+ {
+ 'created_at': row.created_at,
+ 'updated_at': row.updated_at,
+ **row.to_variable().model_dump(),
+ }
+ for row in rows
+ ],
+ }
+
+
+api.add_resource(ConversationVariablesApi, '/apps//conversation-variables')
diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py
index 686ef7b4bebaaa..6eb97b6c817916 100644
--- a/api/controllers/console/app/workflow.py
+++ b/api/controllers/console/app/workflow.py
@@ -74,6 +74,7 @@ def post(self, app_model: App):
parser.add_argument('hash', type=str, required=False, location='json')
# TODO: set this to required=True after frontend is updated
parser.add_argument('environment_variables', type=list, required=False, location='json')
+ parser.add_argument('conversation_variables', type=list, required=False, location='json')
args = parser.parse_args()
elif 'text/plain' in content_type:
try:
@@ -88,7 +89,8 @@ def post(self, app_model: App):
'graph': data.get('graph'),
'features': data.get('features'),
'hash': data.get('hash'),
- 'environment_variables': data.get('environment_variables')
+ 'environment_variables': data.get('environment_variables'),
+ 'conversation_variables': data.get('conversation_variables'),
}
except json.JSONDecodeError:
return {'message': 'Invalid JSON data'}, 400
@@ -100,6 +102,8 @@ def post(self, app_model: App):
try:
environment_variables_list = args.get('environment_variables') or []
environment_variables = [factory.build_variable_from_mapping(obj) for obj in environment_variables_list]
+ conversation_variables_list = args.get('conversation_variables') or []
+ conversation_variables = [factory.build_variable_from_mapping(obj) for obj in conversation_variables_list]
workflow = workflow_service.sync_draft_workflow(
app_model=app_model,
graph=args['graph'],
@@ -107,6 +111,7 @@ def post(self, app_model: App):
unique_hash=args.get('hash'),
account=current_user,
environment_variables=environment_variables,
+ conversation_variables=conversation_variables,
)
except WorkflowHashNotEqualError:
raise DraftWorkflowNotSync()
diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py
index 3e9884328029ce..a5bc2dd86a905d 100644
--- a/api/controllers/console/datasets/datasets.py
+++ b/api/controllers/console/datasets/datasets.py
@@ -555,7 +555,7 @@ def get(self):
RetrievalMethod.SEMANTIC_SEARCH.value
]
}
- case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH | VectorType.ANALYTICDB | VectorType.MYSCALE | VectorType.ORACLE:
+ case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH | VectorType.ANALYTICDB | VectorType.MYSCALE | VectorType.ORACLE | VectorType.ELASTICSEARCH:
return {
'retrieval_method': [
RetrievalMethod.SEMANTIC_SEARCH.value,
@@ -579,7 +579,7 @@ def get(self, vector_type):
RetrievalMethod.SEMANTIC_SEARCH.value
]
}
- case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH| VectorType.ANALYTICDB | VectorType.MYSCALE | VectorType.ORACLE:
+ case VectorType.QDRANT | VectorType.WEAVIATE | VectorType.OPENSEARCH| VectorType.ANALYTICDB | VectorType.MYSCALE | VectorType.ORACLE | VectorType.ELASTICSEARCH:
return {
'retrieval_method': [
RetrievalMethod.SEMANTIC_SEARCH.value,
diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py
index afe0ca7c69b2b7..976b97660ae293 100644
--- a/api/controllers/console/datasets/datasets_document.py
+++ b/api/controllers/console/datasets/datasets_document.py
@@ -178,11 +178,20 @@ def get(self, dataset_id):
.subquery()
query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id) \
- .order_by(sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)))
+ .order_by(
+ sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)),
+ sort_logic(Document.position),
+ )
elif sort == 'created_at':
- query = query.order_by(sort_logic(Document.created_at))
+ query = query.order_by(
+ sort_logic(Document.created_at),
+ sort_logic(Document.position),
+ )
else:
- query = query.order_by(desc(Document.created_at))
+ query = query.order_by(
+ desc(Document.created_at),
+ desc(Document.position),
+ )
paginated_documents = query.paginate(
page=page, per_page=limit, max_per_page=100, error_out=False)
diff --git a/api/controllers/console/extension.py b/api/controllers/console/extension.py
index fa73c44c220a06..fe73bcb98572a5 100644
--- a/api/controllers/console/extension.py
+++ b/api/controllers/console/extension.py
@@ -1,6 +1,7 @@
from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
+from constants import HIDDEN_VALUE
from controllers.console import api
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
@@ -89,7 +90,7 @@ def post(self, id):
extension_data_from_db.name = args['name']
extension_data_from_db.api_endpoint = args['api_endpoint']
- if args['api_key'] != '[__HIDDEN__]':
+ if args['api_key'] != HIDDEN_VALUE:
extension_data_from_db.api_key = args['api_key']
return APIBasedExtensionService.save(extension_data_from_db)
diff --git a/api/controllers/service_api/app/message.py b/api/controllers/service_api/app/message.py
index c8b44cfa38114c..875870e667c8d9 100644
--- a/api/controllers/service_api/app/message.py
+++ b/api/controllers/service_api/app/message.py
@@ -131,7 +131,7 @@ def get(self, app_model: App, end_user: EndUser, message_id):
except services.errors.message.MessageNotExistsError:
raise NotFound("Message Not Exists.")
except SuggestedQuestionsAfterAnswerDisabledError:
- raise BadRequest("Message Not Exists.")
+ raise BadRequest("Suggested Questions Is Disabled.")
except Exception:
logging.exception("internal server error.")
raise InternalServerError()
diff --git a/api/core/app/app_config/easy_ui_based_app/dataset/manager.py b/api/core/app/app_config/easy_ui_based_app/dataset/manager.py
index ec17db5f06a30c..f4e6675bd44435 100644
--- a/api/core/app/app_config/easy_ui_based_app/dataset/manager.py
+++ b/api/core/app/app_config/easy_ui_based_app/dataset/manager.py
@@ -93,6 +93,7 @@ def convert(cls, config: dict) -> Optional[DatasetEntity]:
reranking_model=dataset_configs.get('reranking_model'),
weights=dataset_configs.get('weights'),
reranking_enabled=dataset_configs.get('reranking_enabled', True),
+ rerank_mode=dataset_configs["reranking_mode"],
)
)
diff --git a/api/core/app/app_config/entities.py b/api/core/app/app_config/entities.py
index a490ddd67089f4..05a42a898e4af7 100644
--- a/api/core/app/app_config/entities.py
+++ b/api/core/app/app_config/entities.py
@@ -3,8 +3,9 @@
from pydantic import BaseModel
+from core.file.file_obj import FileExtraConfig
from core.model_runtime.entities.message_entities import PromptMessageRole
-from models.model import AppMode
+from models import AppMode
class ModelConfigEntity(BaseModel):
@@ -200,11 +201,6 @@ class TracingConfigEntity(BaseModel):
tracing_provider: str
-class FileExtraConfig(BaseModel):
- """
- File Upload Entity.
- """
- image_config: Optional[dict[str, Any]] = None
class AppAdditionalFeatures(BaseModel):
diff --git a/api/core/app/app_config/features/file_upload/manager.py b/api/core/app/app_config/features/file_upload/manager.py
index 86799fb1abe133..3da3c2eddb83f3 100644
--- a/api/core/app/app_config/features/file_upload/manager.py
+++ b/api/core/app/app_config/features/file_upload/manager.py
@@ -1,7 +1,7 @@
from collections.abc import Mapping
from typing import Any, Optional
-from core.app.app_config.entities import FileExtraConfig
+from core.file.file_obj import FileExtraConfig
class FileUploadConfigManager:
diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py
index 0141dbec58de6e..351eb05d8ad41c 100644
--- a/api/core/app/apps/advanced_chat/app_generator.py
+++ b/api/core/app/apps/advanced_chat/app_generator.py
@@ -8,6 +8,8 @@
from flask import Flask, current_app
from pydantic import ValidationError
+from sqlalchemy import select
+from sqlalchemy.orm import Session
import contexts
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
@@ -18,15 +20,20 @@
from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedException, PublishFrom
from core.app.apps.message_based_app_generator import MessageBasedAppGenerator
from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager
-from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom
+from core.app.entities.app_invoke_entities import (
+ AdvancedChatAppGenerateEntity,
+ InvokeFrom,
+)
from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse
from core.file.message_file_parser import MessageFileParser
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
from core.ops.ops_trace_manager import TraceQueueManager
+from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from extensions.ext_database import db
from models.account import Account
from models.model import App, Conversation, EndUser, Message
-from models.workflow import Workflow
+from models.workflow import ConversationVariable, Workflow
logger = logging.getLogger(__name__)
@@ -89,7 +96,8 @@ def generate(
)
# get tracing instance
- trace_manager = TraceQueueManager(app_id=app_model.id)
+ user_id = user.id if isinstance(user, Account) else user.session_id
+ trace_manager = TraceQueueManager(app_model.id, user_id)
if invoke_from == InvokeFrom.DEBUGGER:
# always enable retriever resource in debugger mode
@@ -112,7 +120,6 @@ def generate(
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
return self._generate(
- app_model=app_model,
workflow=workflow,
user=user,
invoke_from=invoke_from,
@@ -120,7 +127,7 @@ def generate(
conversation=conversation,
stream=stream
)
-
+
def single_iteration_generate(self, app_model: App,
workflow: Workflow,
node_id: str,
@@ -140,10 +147,10 @@ def single_iteration_generate(self, app_model: App,
"""
if not node_id:
raise ValueError('node_id is required')
-
+
if args.get('inputs') is None:
raise ValueError('inputs is required')
-
+
extras = {
"auto_generate_conversation_name": False
}
@@ -179,7 +186,6 @@ def single_iteration_generate(self, app_model: App,
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
return self._generate(
- app_model=app_model,
workflow=workflow,
user=user,
invoke_from=InvokeFrom.DEBUGGER,
@@ -188,12 +194,12 @@ def single_iteration_generate(self, app_model: App,
stream=stream
)
- def _generate(self, app_model: App,
+ def _generate(self, *,
workflow: Workflow,
user: Union[Account, EndUser],
invoke_from: InvokeFrom,
application_generate_entity: AdvancedChatAppGenerateEntity,
- conversation: Conversation = None,
+ conversation: Conversation | None = None,
stream: bool = True) \
-> Union[dict, Generator[dict, None, None]]:
is_first_conversation = False
@@ -210,7 +216,7 @@ def _generate(self, app_model: App,
# update conversation features
conversation.override_model_configs = workflow.features
db.session.commit()
- db.session.refresh(conversation)
+ # db.session.refresh(conversation)
# init queue manager
queue_manager = MessageBasedAppQueueManager(
@@ -222,15 +228,69 @@ def _generate(self, app_model: App,
message_id=message.id
)
+ # Init conversation variables
+ stmt = select(ConversationVariable).where(
+ ConversationVariable.app_id == conversation.app_id, ConversationVariable.conversation_id == conversation.id
+ )
+ with Session(db.engine) as session:
+ conversation_variables = session.scalars(stmt).all()
+ if not conversation_variables:
+ # Create conversation variables if they don't exist.
+ conversation_variables = [
+ ConversationVariable.from_variable(
+ app_id=conversation.app_id, conversation_id=conversation.id, variable=variable
+ )
+ for variable in workflow.conversation_variables
+ ]
+ session.add_all(conversation_variables)
+ # Convert database entities to variables.
+ conversation_variables = [item.to_variable() for item in conversation_variables]
+
+ session.commit()
+
+ # Increment dialogue count.
+ conversation.dialogue_count += 1
+
+ conversation_id = conversation.id
+ conversation_dialogue_count = conversation.dialogue_count
+ db.session.commit()
+ db.session.refresh(conversation)
+
+ inputs = application_generate_entity.inputs
+ query = application_generate_entity.query
+ files = application_generate_entity.files
+
+ user_id = None
+ if application_generate_entity.invoke_from in [InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API]:
+ end_user = db.session.query(EndUser).filter(EndUser.id == application_generate_entity.user_id).first()
+ if end_user:
+ user_id = end_user.session_id
+ else:
+ user_id = application_generate_entity.user_id
+
+ # Create a variable pool.
+ system_inputs = {
+ SystemVariable.QUERY: query,
+ SystemVariable.FILES: files,
+ SystemVariable.CONVERSATION_ID: conversation_id,
+ SystemVariable.USER_ID: user_id,
+ SystemVariable.DIALOGUE_COUNT: conversation_dialogue_count,
+ }
+ variable_pool = VariablePool(
+ system_variables=system_inputs,
+ user_inputs=inputs,
+ environment_variables=workflow.environment_variables,
+ conversation_variables=conversation_variables,
+ )
+ contexts.workflow_variable_pool.set(variable_pool)
+
# new thread
worker_thread = threading.Thread(target=self._generate_worker, kwargs={
'flask_app': current_app._get_current_object(),
'application_generate_entity': application_generate_entity,
'queue_manager': queue_manager,
- 'conversation_id': conversation.id,
'message_id': message.id,
- 'user': user,
- 'context': contextvars.copy_context()
+ 'context': contextvars.copy_context(),
})
worker_thread.start()
@@ -243,7 +303,7 @@ def _generate(self, app_model: App,
conversation=conversation,
message=message,
user=user,
- stream=stream
+ stream=stream,
)
return AdvancedChatAppGenerateResponseConverter.convert(
@@ -254,9 +314,7 @@ def _generate(self, app_model: App,
def _generate_worker(self, flask_app: Flask,
application_generate_entity: AdvancedChatAppGenerateEntity,
queue_manager: AppQueueManager,
- conversation_id: str,
message_id: str,
- user: Account,
context: contextvars.Context) -> None:
"""
Generate worker in a new thread.
@@ -283,8 +341,7 @@ def _generate_worker(self, flask_app: Flask,
user_id=application_generate_entity.user_id
)
else:
- # get conversation and message
- conversation = self._get_conversation(conversation_id)
+ # get message
message = self._get_message(message_id)
# chatbot app
@@ -292,7 +349,6 @@ def _generate_worker(self, flask_app: Flask,
runner.run(
application_generate_entity=application_generate_entity,
queue_manager=queue_manager,
- conversation=conversation,
message=message
)
except GenerateTaskStoppedException:
@@ -315,14 +371,17 @@ def _generate_worker(self, flask_app: Flask,
finally:
db.session.close()
- def _handle_advanced_chat_response(self, application_generate_entity: AdvancedChatAppGenerateEntity,
- workflow: Workflow,
- queue_manager: AppQueueManager,
- conversation: Conversation,
- message: Message,
- user: Union[Account, EndUser],
- stream: bool = False) \
- -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]:
+ def _handle_advanced_chat_response(
+ self,
+ *,
+ application_generate_entity: AdvancedChatAppGenerateEntity,
+ workflow: Workflow,
+ queue_manager: AppQueueManager,
+ conversation: Conversation,
+ message: Message,
+ user: Union[Account, EndUser],
+ stream: bool = False,
+ ) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]:
"""
Handle response.
:param application_generate_entity: application generate entity
@@ -342,7 +401,7 @@ def _handle_advanced_chat_response(self, application_generate_entity: AdvancedCh
conversation=conversation,
message=message,
user=user,
- stream=stream
+ stream=stream,
)
try:
diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py
index 18db0ab22d4ded..5dc03979cf3b4b 100644
--- a/api/core/app/apps/advanced_chat/app_runner.py
+++ b/api/core/app/apps/advanced_chat/app_runner.py
@@ -16,12 +16,10 @@
from core.app.entities.queue_entities import QueueAnnotationReplyEvent, QueueStopEvent, QueueTextChunkEvent
from core.moderation.base import ModerationException
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
-from core.workflow.entities.node_entities import SystemVariable
from core.workflow.nodes.base_node import UserFrom
from core.workflow.workflow_engine_manager import WorkflowEngineManager
from extensions.ext_database import db
-from models.model import App, Conversation, EndUser, Message
-from models.workflow import Workflow
+from models import App, Message, Workflow
logger = logging.getLogger(__name__)
@@ -31,10 +29,12 @@ class AdvancedChatAppRunner(AppRunner):
AdvancedChat Application Runner
"""
- def run(self, application_generate_entity: AdvancedChatAppGenerateEntity,
- queue_manager: AppQueueManager,
- conversation: Conversation,
- message: Message) -> None:
+ def run(
+ self,
+ application_generate_entity: AdvancedChatAppGenerateEntity,
+ queue_manager: AppQueueManager,
+ message: Message,
+ ) -> None:
"""
Run application
:param application_generate_entity: application generate entity
@@ -48,53 +48,43 @@ def run(self, application_generate_entity: AdvancedChatAppGenerateEntity,
app_record = db.session.query(App).filter(App.id == app_config.app_id).first()
if not app_record:
- raise ValueError("App not found")
+ raise ValueError('App not found')
workflow = self.get_workflow(app_model=app_record, workflow_id=app_config.workflow_id)
if not workflow:
- raise ValueError("Workflow not initialized")
+ raise ValueError('Workflow not initialized')
inputs = application_generate_entity.inputs
query = application_generate_entity.query
- files = application_generate_entity.files
-
- user_id = None
- if application_generate_entity.invoke_from in [InvokeFrom.WEB_APP, InvokeFrom.SERVICE_API]:
- end_user = db.session.query(EndUser).filter(EndUser.id == application_generate_entity.user_id).first()
- if end_user:
- user_id = end_user.session_id
- else:
- user_id = application_generate_entity.user_id
# moderation
if self.handle_input_moderation(
- queue_manager=queue_manager,
- app_record=app_record,
- app_generate_entity=application_generate_entity,
- inputs=inputs,
- query=query,
- message_id=message.id
+ queue_manager=queue_manager,
+ app_record=app_record,
+ app_generate_entity=application_generate_entity,
+ inputs=inputs,
+ query=query,
+ message_id=message.id,
):
return
# annotation reply
if self.handle_annotation_reply(
- app_record=app_record,
- message=message,
- query=query,
- queue_manager=queue_manager,
- app_generate_entity=application_generate_entity
+ app_record=app_record,
+ message=message,
+ query=query,
+ queue_manager=queue_manager,
+ app_generate_entity=application_generate_entity,
):
return
db.session.close()
- workflow_callbacks: list[WorkflowCallback] = [WorkflowEventTriggerCallback(
- queue_manager=queue_manager,
- workflow=workflow
- )]
+ workflow_callbacks: list[WorkflowCallback] = [
+ WorkflowEventTriggerCallback(queue_manager=queue_manager, workflow=workflow)
+ ]
- if bool(os.environ.get("DEBUG", 'False').lower() == 'true'):
+ if bool(os.environ.get('DEBUG', 'False').lower() == 'true'):
workflow_callbacks.append(WorkflowLoggingCallback())
# RUN WORKFLOW
@@ -106,43 +96,29 @@ def run(self, application_generate_entity: AdvancedChatAppGenerateEntity,
if application_generate_entity.invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER]
else UserFrom.END_USER,
invoke_from=application_generate_entity.invoke_from,
- user_inputs=inputs,
- system_inputs={
- SystemVariable.QUERY: query,
- SystemVariable.FILES: files,
- SystemVariable.CONVERSATION_ID: conversation.id,
- SystemVariable.USER_ID: user_id
- },
callbacks=workflow_callbacks,
- call_depth=application_generate_entity.call_depth
+ call_depth=application_generate_entity.call_depth,
)
- def single_iteration_run(self, app_id: str, workflow_id: str,
- queue_manager: AppQueueManager,
- inputs: dict, node_id: str, user_id: str) -> None:
+ def single_iteration_run(
+ self, app_id: str, workflow_id: str, queue_manager: AppQueueManager, inputs: dict, node_id: str, user_id: str
+ ) -> None:
"""
Single iteration run
"""
- app_record: App = db.session.query(App).filter(App.id == app_id).first()
+ app_record = db.session.query(App).filter(App.id == app_id).first()
if not app_record:
- raise ValueError("App not found")
-
+ raise ValueError('App not found')
+
workflow = self.get_workflow(app_model=app_record, workflow_id=workflow_id)
if not workflow:
- raise ValueError("Workflow not initialized")
-
- workflow_callbacks = [WorkflowEventTriggerCallback(
- queue_manager=queue_manager,
- workflow=workflow
- )]
+ raise ValueError('Workflow not initialized')
+
+ workflow_callbacks = [WorkflowEventTriggerCallback(queue_manager=queue_manager, workflow=workflow)]
workflow_engine_manager = WorkflowEngineManager()
workflow_engine_manager.single_step_run_iteration_workflow_node(
- workflow=workflow,
- node_id=node_id,
- user_id=user_id,
- user_inputs=inputs,
- callbacks=workflow_callbacks
+ workflow=workflow, node_id=node_id, user_id=user_id, user_inputs=inputs, callbacks=workflow_callbacks
)
def get_workflow(self, app_model: App, workflow_id: str) -> Optional[Workflow]:
@@ -150,22 +126,25 @@ def get_workflow(self, app_model: App, workflow_id: str) -> Optional[Workflow]:
Get workflow
"""
# fetch workflow by workflow_id
- workflow = db.session.query(Workflow).filter(
- Workflow.tenant_id == app_model.tenant_id,
- Workflow.app_id == app_model.id,
- Workflow.id == workflow_id
- ).first()
+ workflow = (
+ db.session.query(Workflow)
+ .filter(
+ Workflow.tenant_id == app_model.tenant_id, Workflow.app_id == app_model.id, Workflow.id == workflow_id
+ )
+ .first()
+ )
# return workflow
return workflow
def handle_input_moderation(
- self, queue_manager: AppQueueManager,
- app_record: App,
- app_generate_entity: AdvancedChatAppGenerateEntity,
- inputs: Mapping[str, Any],
- query: str,
- message_id: str
+ self,
+ queue_manager: AppQueueManager,
+ app_record: App,
+ app_generate_entity: AdvancedChatAppGenerateEntity,
+ inputs: Mapping[str, Any],
+ query: str,
+ message_id: str,
) -> bool:
"""
Handle input moderation
@@ -192,17 +171,20 @@ def handle_input_moderation(
queue_manager=queue_manager,
text=str(e),
stream=app_generate_entity.stream,
- stopped_by=QueueStopEvent.StopBy.INPUT_MODERATION
+ stopped_by=QueueStopEvent.StopBy.INPUT_MODERATION,
)
return True
return False
- def handle_annotation_reply(self, app_record: App,
- message: Message,
- query: str,
- queue_manager: AppQueueManager,
- app_generate_entity: AdvancedChatAppGenerateEntity) -> bool:
+ def handle_annotation_reply(
+ self,
+ app_record: App,
+ message: Message,
+ query: str,
+ queue_manager: AppQueueManager,
+ app_generate_entity: AdvancedChatAppGenerateEntity,
+ ) -> bool:
"""
Handle annotation reply
:param app_record: app record
@@ -217,29 +199,27 @@ def handle_annotation_reply(self, app_record: App,
message=message,
query=query,
user_id=app_generate_entity.user_id,
- invoke_from=app_generate_entity.invoke_from
+ invoke_from=app_generate_entity.invoke_from,
)
if annotation_reply:
queue_manager.publish(
- QueueAnnotationReplyEvent(message_annotation_id=annotation_reply.id),
- PublishFrom.APPLICATION_MANAGER
+ QueueAnnotationReplyEvent(message_annotation_id=annotation_reply.id), PublishFrom.APPLICATION_MANAGER
)
self._stream_output(
queue_manager=queue_manager,
text=annotation_reply.content,
stream=app_generate_entity.stream,
- stopped_by=QueueStopEvent.StopBy.ANNOTATION_REPLY
+ stopped_by=QueueStopEvent.StopBy.ANNOTATION_REPLY,
)
return True
return False
- def _stream_output(self, queue_manager: AppQueueManager,
- text: str,
- stream: bool,
- stopped_by: QueueStopEvent.StopBy) -> None:
+ def _stream_output(
+ self, queue_manager: AppQueueManager, text: str, stream: bool, stopped_by: QueueStopEvent.StopBy
+ ) -> None:
"""
Direct output
:param queue_manager: application queue manager
@@ -250,21 +230,10 @@ def _stream_output(self, queue_manager: AppQueueManager,
if stream:
index = 0
for token in text:
- queue_manager.publish(
- QueueTextChunkEvent(
- text=token
- ), PublishFrom.APPLICATION_MANAGER
- )
+ queue_manager.publish(QueueTextChunkEvent(text=token), PublishFrom.APPLICATION_MANAGER)
index += 1
time.sleep(0.01)
else:
- queue_manager.publish(
- QueueTextChunkEvent(
- text=text
- ), PublishFrom.APPLICATION_MANAGER
- )
+ queue_manager.publish(QueueTextChunkEvent(text=text), PublishFrom.APPLICATION_MANAGER)
- queue_manager.publish(
- QueueStopEvent(stopped_by=stopped_by),
- PublishFrom.APPLICATION_MANAGER
- )
+ queue_manager.publish(QueueStopEvent(stopped_by=stopped_by), PublishFrom.APPLICATION_MANAGER)
diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
index a042d30e001aec..f8efcb59606d08 100644
--- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py
+++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py
@@ -4,6 +4,7 @@
from collections.abc import Generator
from typing import Any, Optional, Union, cast
+import contexts
from constants.tts_auto_play_timeout import TTS_AUTO_PLAY_TIMEOUT, TTS_AUTO_PLAY_YIELD_CPU_TIME
from core.app.apps.advanced_chat.app_generator_tts_publisher import AppGeneratorTTSPublisher, AudioTrunk
from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom
@@ -47,7 +48,8 @@
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.ops_trace_manager import TraceQueueManager
-from core.workflow.entities.node_entities import NodeType, SystemVariable
+from core.workflow.entities.node_entities import NodeType
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.answer.answer_node import AnswerNode
from core.workflow.nodes.answer.entities import TextGenerateRouteChunk, VarGenerateRouteChunk
from events.message_event import message_was_created
@@ -71,6 +73,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
_application_generate_entity: AdvancedChatAppGenerateEntity
_workflow: Workflow
_user: Union[Account, EndUser]
+ # Deprecated
_workflow_system_variables: dict[SystemVariable, Any]
_iteration_nested_relations: dict[str, list[str]]
@@ -81,7 +84,7 @@ def __init__(
conversation: Conversation,
message: Message,
user: Union[Account, EndUser],
- stream: bool
+ stream: bool,
) -> None:
"""
Initialize AdvancedChatAppGenerateTaskPipeline.
@@ -103,11 +106,12 @@ def __init__(
self._workflow = workflow
self._conversation = conversation
self._message = message
+ # Deprecated
self._workflow_system_variables = {
SystemVariable.QUERY: message.query,
SystemVariable.FILES: application_generate_entity.files,
SystemVariable.CONVERSATION_ID: conversation.id,
- SystemVariable.USER_ID: user_id
+ SystemVariable.USER_ID: user_id,
}
self._task_state = AdvancedChatTaskState(
@@ -244,7 +248,11 @@ def _process_stream_response(
:return:
"""
for message in self._queue_manager.listen():
- if hasattr(message.event, 'metadata') and message.event.metadata.get('is_answer_previous_node', False) and publisher:
+ if (message.event
+ and hasattr(message.event, 'metadata')
+ and message.event.metadata
+ and message.event.metadata.get('is_answer_previous_node', False)
+ and publisher):
publisher.publish(message=message)
elif (hasattr(message.event, 'execution_metadata')
and message.event.execution_metadata
@@ -609,7 +617,9 @@ def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]:
if route_chunk_node_id == 'sys':
# system variable
- value = self._workflow_system_variables.get(SystemVariable.value_of(value_selector[1]))
+ value = contexts.workflow_variable_pool.get().get(value_selector)
+ if value:
+ value = value.text
elif route_chunk_node_id in self._iteration_nested_relations:
# it's a iteration variable
if not self._iteration_state or route_chunk_node_id not in self._iteration_state.current_iterations:
diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py
index 58c7d04b8348f8..6fb387c15ac566 100644
--- a/api/core/app/apps/base_app_runner.py
+++ b/api/core/app/apps/base_app_runner.py
@@ -1,6 +1,6 @@
import time
from collections.abc import Generator
-from typing import Optional, Union
+from typing import TYPE_CHECKING, Optional, Union
from core.app.app_config.entities import ExternalDataVariableEntity, PromptTemplateEntity
from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom
@@ -14,7 +14,6 @@
from core.app.features.annotation_reply.annotation_reply import AnnotationReplyFeature
from core.app.features.hosting_moderation.hosting_moderation import HostingModerationFeature
from core.external_data_tool.external_data_fetch import ExternalDataFetch
-from core.file.file_obj import FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
@@ -27,13 +26,16 @@
from core.prompt.simple_prompt_transform import ModelMode, SimplePromptTransform
from models.model import App, AppMode, Message, MessageAnnotation
+if TYPE_CHECKING:
+ from core.file.file_obj import FileVar
+
class AppRunner:
def get_pre_calculate_rest_tokens(self, app_record: App,
model_config: ModelConfigWithCredentialsEntity,
prompt_template_entity: PromptTemplateEntity,
inputs: dict[str, str],
- files: list[FileVar],
+ files: list["FileVar"],
query: Optional[str] = None) -> int:
"""
Get pre calculate rest tokens
@@ -126,7 +128,7 @@ def organize_prompt_messages(self, app_record: App,
model_config: ModelConfigWithCredentialsEntity,
prompt_template_entity: PromptTemplateEntity,
inputs: dict[str, str],
- files: list[FileVar],
+ files: list["FileVar"],
query: Optional[str] = None,
context: Optional[str] = None,
memory: Optional[TokenBufferMemory] = None) \
@@ -366,7 +368,7 @@ def moderation_for_inputs(
message_id=message_id,
trace_manager=app_generate_entity.trace_manager
)
-
+
def check_hosting_moderation(self, application_generate_entity: EasyUIBasedAppGenerateEntity,
queue_manager: AppQueueManager,
prompt_messages: list[PromptMessage]) -> bool:
@@ -418,7 +420,7 @@ def fill_in_inputs_from_external_data_tools(self, tenant_id: str,
inputs=inputs,
query=query
)
-
+
def query_app_annotations_to_reply(self, app_record: App,
message: Message,
query: str,
diff --git a/api/core/app/apps/message_based_app_generator.py b/api/core/app/apps/message_based_app_generator.py
index c5cd6864020b33..12f69f1528e241 100644
--- a/api/core/app/apps/message_based_app_generator.py
+++ b/api/core/app/apps/message_based_app_generator.py
@@ -258,7 +258,7 @@ def _get_conversation_introduction(self, application_generate_entity: AppGenerat
return introduction
- def _get_conversation(self, conversation_id: str) -> Conversation:
+ def _get_conversation(self, conversation_id: str):
"""
Get conversation by conversation id
:param conversation_id: conversation id
@@ -270,6 +270,9 @@ def _get_conversation(self, conversation_id: str) -> Conversation:
.first()
)
+ if not conversation:
+ raise ConversationNotExistsError()
+
return conversation
def _get_message(self, message_id: str) -> Message:
diff --git a/api/core/app/apps/workflow/app_runner.py b/api/core/app/apps/workflow/app_runner.py
index 24f4a83217a239..994919391e7ed5 100644
--- a/api/core/app/apps/workflow/app_runner.py
+++ b/api/core/app/apps/workflow/app_runner.py
@@ -11,7 +11,8 @@
WorkflowAppGenerateEntity,
)
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
-from core.workflow.entities.node_entities import SystemVariable
+from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.base_node import UserFrom
from core.workflow.workflow_engine_manager import WorkflowEngineManager
from extensions.ext_database import db
@@ -26,8 +27,7 @@ class WorkflowAppRunner:
Workflow Application Runner
"""
- def run(self, application_generate_entity: WorkflowAppGenerateEntity,
- queue_manager: AppQueueManager) -> None:
+ def run(self, application_generate_entity: WorkflowAppGenerateEntity, queue_manager: AppQueueManager) -> None:
"""
Run application
:param application_generate_entity: application generate entity
@@ -47,25 +47,36 @@ def run(self, application_generate_entity: WorkflowAppGenerateEntity,
app_record = db.session.query(App).filter(App.id == app_config.app_id).first()
if not app_record:
- raise ValueError("App not found")
+ raise ValueError('App not found')
workflow = self.get_workflow(app_model=app_record, workflow_id=app_config.workflow_id)
if not workflow:
- raise ValueError("Workflow not initialized")
+ raise ValueError('Workflow not initialized')
inputs = application_generate_entity.inputs
files = application_generate_entity.files
db.session.close()
- workflow_callbacks: list[WorkflowCallback] = [WorkflowEventTriggerCallback(
- queue_manager=queue_manager,
- workflow=workflow
- )]
+ workflow_callbacks: list[WorkflowCallback] = [
+ WorkflowEventTriggerCallback(queue_manager=queue_manager, workflow=workflow)
+ ]
- if bool(os.environ.get("DEBUG", 'False').lower() == 'true'):
+ if bool(os.environ.get('DEBUG', 'False').lower() == 'true'):
workflow_callbacks.append(WorkflowLoggingCallback())
+ # Create a variable pool.
+ system_inputs = {
+ SystemVariable.FILES: files,
+ SystemVariable.USER_ID: user_id,
+ }
+ variable_pool = VariablePool(
+ system_variables=system_inputs,
+ user_inputs=inputs,
+ environment_variables=workflow.environment_variables,
+ conversation_variables=[],
+ )
+
# RUN WORKFLOW
workflow_engine_manager = WorkflowEngineManager()
workflow_engine_manager.run_workflow(
@@ -75,44 +86,33 @@ def run(self, application_generate_entity: WorkflowAppGenerateEntity,
if application_generate_entity.invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER]
else UserFrom.END_USER,
invoke_from=application_generate_entity.invoke_from,
- user_inputs=inputs,
- system_inputs={
- SystemVariable.FILES: files,
- SystemVariable.USER_ID: user_id
- },
callbacks=workflow_callbacks,
- call_depth=application_generate_entity.call_depth
+ call_depth=application_generate_entity.call_depth,
+ variable_pool=variable_pool,
)
- def single_iteration_run(self, app_id: str, workflow_id: str,
- queue_manager: AppQueueManager,
- inputs: dict, node_id: str, user_id: str) -> None:
+ def single_iteration_run(
+ self, app_id: str, workflow_id: str, queue_manager: AppQueueManager, inputs: dict, node_id: str, user_id: str
+ ) -> None:
"""
Single iteration run
"""
- app_record: App = db.session.query(App).filter(App.id == app_id).first()
+ app_record = db.session.query(App).filter(App.id == app_id).first()
if not app_record:
- raise ValueError("App not found")
-
+ raise ValueError('App not found')
+
if not app_record.workflow_id:
- raise ValueError("Workflow not initialized")
+ raise ValueError('Workflow not initialized')
workflow = self.get_workflow(app_model=app_record, workflow_id=workflow_id)
if not workflow:
- raise ValueError("Workflow not initialized")
-
- workflow_callbacks = [WorkflowEventTriggerCallback(
- queue_manager=queue_manager,
- workflow=workflow
- )]
+ raise ValueError('Workflow not initialized')
+
+ workflow_callbacks = [WorkflowEventTriggerCallback(queue_manager=queue_manager, workflow=workflow)]
workflow_engine_manager = WorkflowEngineManager()
workflow_engine_manager.single_step_run_iteration_workflow_node(
- workflow=workflow,
- node_id=node_id,
- user_id=user_id,
- user_inputs=inputs,
- callbacks=workflow_callbacks
+ workflow=workflow, node_id=node_id, user_id=user_id, user_inputs=inputs, callbacks=workflow_callbacks
)
def get_workflow(self, app_model: App, workflow_id: str) -> Optional[Workflow]:
@@ -120,11 +120,13 @@ def get_workflow(self, app_model: App, workflow_id: str) -> Optional[Workflow]:
Get workflow
"""
# fetch workflow by workflow_id
- workflow = db.session.query(Workflow).filter(
- Workflow.tenant_id == app_model.tenant_id,
- Workflow.app_id == app_model.id,
- Workflow.id == workflow_id
- ).first()
+ workflow = (
+ db.session.query(Workflow)
+ .filter(
+ Workflow.tenant_id == app_model.tenant_id, Workflow.app_id == app_model.id, Workflow.id == workflow_id
+ )
+ .first()
+ )
# return workflow
return workflow
diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py
index 2b4362150fc7e7..5022eb0438d13b 100644
--- a/api/core/app/apps/workflow/generate_task_pipeline.py
+++ b/api/core/app/apps/workflow/generate_task_pipeline.py
@@ -42,7 +42,8 @@
from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline
from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage
from core.ops.ops_trace_manager import TraceQueueManager
-from core.workflow.entities.node_entities import NodeType, SystemVariable
+from core.workflow.entities.node_entities import NodeType
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.end.end_node import EndNode
from extensions.ext_database import db
from models.account import Account
@@ -519,7 +520,7 @@ def _get_iteration_nested_relations(self, graph: dict) -> dict[str, list[str]]:
"""
nodes = graph.get('nodes')
- iteration_ids = [node.get('id') for node in nodes
+ iteration_ids = [node.get('id') for node in nodes
if node.get('data', {}).get('type') in [
NodeType.ITERATION.value,
NodeType.LOOP.value,
@@ -530,4 +531,3 @@ def _get_iteration_nested_relations(self, graph: dict) -> dict[str, list[str]]:
node.get('id') for node in nodes if node.get('data', {}).get('iteration_id') == iteration_id
] for iteration_id in iteration_ids
}
-
\ No newline at end of file
diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py
index 9a861c29e2634c..6a1ab230416d0c 100644
--- a/api/core/app/entities/app_invoke_entities.py
+++ b/api/core/app/entities/app_invoke_entities.py
@@ -166,4 +166,4 @@ class SingleIterationRunEntity(BaseModel):
node_id: str
inputs: dict
- single_iteration_run: Optional[SingleIterationRunEntity] = None
\ No newline at end of file
+ single_iteration_run: Optional[SingleIterationRunEntity] = None
diff --git a/api/core/app/segments/__init__.py b/api/core/app/segments/__init__.py
index d5cd0a589cc38a..7de06dfb9639fd 100644
--- a/api/core/app/segments/__init__.py
+++ b/api/core/app/segments/__init__.py
@@ -1,7 +1,7 @@
from .segment_group import SegmentGroup
from .segments import (
ArrayAnySegment,
- FileSegment,
+ ArraySegment,
FloatSegment,
IntegerSegment,
NoneSegment,
@@ -12,11 +12,9 @@
from .types import SegmentType
from .variables import (
ArrayAnyVariable,
- ArrayFileVariable,
ArrayNumberVariable,
ArrayObjectVariable,
ArrayStringVariable,
- FileVariable,
FloatVariable,
IntegerVariable,
NoneVariable,
@@ -31,7 +29,6 @@
'FloatVariable',
'ObjectVariable',
'SecretVariable',
- 'FileVariable',
'StringVariable',
'ArrayAnyVariable',
'Variable',
@@ -44,10 +41,9 @@
'FloatSegment',
'ObjectSegment',
'ArrayAnySegment',
- 'FileSegment',
'StringSegment',
'ArrayStringVariable',
'ArrayNumberVariable',
'ArrayObjectVariable',
- 'ArrayFileVariable',
+ 'ArraySegment',
]
diff --git a/api/core/app/segments/exc.py b/api/core/app/segments/exc.py
new file mode 100644
index 00000000000000..d15d6d500ffa4a
--- /dev/null
+++ b/api/core/app/segments/exc.py
@@ -0,0 +1,2 @@
+class VariableError(Exception):
+ pass
diff --git a/api/core/app/segments/factory.py b/api/core/app/segments/factory.py
index f62e44bf07d2f7..e6e9ce97747ce1 100644
--- a/api/core/app/segments/factory.py
+++ b/api/core/app/segments/factory.py
@@ -1,11 +1,11 @@
from collections.abc import Mapping
from typing import Any
-from core.file.file_obj import FileVar
+from configs import dify_config
+from .exc import VariableError
from .segments import (
ArrayAnySegment,
- FileSegment,
FloatSegment,
IntegerSegment,
NoneSegment,
@@ -15,11 +15,9 @@
)
from .types import SegmentType
from .variables import (
- ArrayFileVariable,
ArrayNumberVariable,
ArrayObjectVariable,
ArrayStringVariable,
- FileVariable,
FloatVariable,
IntegerVariable,
ObjectVariable,
@@ -29,39 +27,37 @@
)
-def build_variable_from_mapping(m: Mapping[str, Any], /) -> Variable:
- if (value_type := m.get('value_type')) is None:
- raise ValueError('missing value type')
- if not m.get('name'):
- raise ValueError('missing name')
- if (value := m.get('value')) is None:
- raise ValueError('missing value')
+def build_variable_from_mapping(mapping: Mapping[str, Any], /) -> Variable:
+ if (value_type := mapping.get('value_type')) is None:
+ raise VariableError('missing value type')
+ if not mapping.get('name'):
+ raise VariableError('missing name')
+ if (value := mapping.get('value')) is None:
+ raise VariableError('missing value')
match value_type:
case SegmentType.STRING:
- return StringVariable.model_validate(m)
+ result = StringVariable.model_validate(mapping)
case SegmentType.SECRET:
- return SecretVariable.model_validate(m)
+ result = SecretVariable.model_validate(mapping)
case SegmentType.NUMBER if isinstance(value, int):
- return IntegerVariable.model_validate(m)
+ result = IntegerVariable.model_validate(mapping)
case SegmentType.NUMBER if isinstance(value, float):
- return FloatVariable.model_validate(m)
+ result = FloatVariable.model_validate(mapping)
case SegmentType.NUMBER if not isinstance(value, float | int):
- raise ValueError(f'invalid number value {value}')
- case SegmentType.FILE:
- return FileVariable.model_validate(m)
+ raise VariableError(f'invalid number value {value}')
case SegmentType.OBJECT if isinstance(value, dict):
- return ObjectVariable.model_validate(
- {**m, 'value': {k: build_variable_from_mapping(v) for k, v in value.items()}}
- )
+ result = ObjectVariable.model_validate(mapping)
case SegmentType.ARRAY_STRING if isinstance(value, list):
- return ArrayStringVariable.model_validate({**m, 'value': [build_variable_from_mapping(v) for v in value]})
+ result = ArrayStringVariable.model_validate(mapping)
case SegmentType.ARRAY_NUMBER if isinstance(value, list):
- return ArrayNumberVariable.model_validate({**m, 'value': [build_variable_from_mapping(v) for v in value]})
+ result = ArrayNumberVariable.model_validate(mapping)
case SegmentType.ARRAY_OBJECT if isinstance(value, list):
- return ArrayObjectVariable.model_validate({**m, 'value': [build_variable_from_mapping(v) for v in value]})
- case SegmentType.ARRAY_FILE if isinstance(value, list):
- return ArrayFileVariable.model_validate({**m, 'value': [build_variable_from_mapping(v) for v in value]})
- raise ValueError(f'not supported value type {value_type}')
+ result = ArrayObjectVariable.model_validate(mapping)
+ case _:
+ raise VariableError(f'not supported value type {value_type}')
+ if result.size > dify_config.MAX_VARIABLE_SIZE:
+ raise VariableError(f'variable size {result.size} exceeds limit {dify_config.MAX_VARIABLE_SIZE}')
+ return result
def build_segment(value: Any, /) -> Segment:
@@ -74,13 +70,7 @@ def build_segment(value: Any, /) -> Segment:
if isinstance(value, float):
return FloatSegment(value=value)
if isinstance(value, dict):
- # TODO: Limit the depth of the object
- obj = {k: build_segment(v) for k, v in value.items()}
- return ObjectSegment(value=obj)
+ return ObjectSegment(value=value)
if isinstance(value, list):
- # TODO: Limit the depth of the array
- elements = [build_segment(v) for v in value]
- return ArrayAnySegment(value=elements)
- if isinstance(value, FileVar):
- return FileSegment(value=value)
+ return ArrayAnySegment(value=value)
raise ValueError(f'not supported value {value}')
diff --git a/api/core/app/segments/segments.py b/api/core/app/segments/segments.py
index 4227f154e6aaac..5c713cac6747f9 100644
--- a/api/core/app/segments/segments.py
+++ b/api/core/app/segments/segments.py
@@ -1,11 +1,10 @@
import json
+import sys
from collections.abc import Mapping, Sequence
from typing import Any
from pydantic import BaseModel, ConfigDict, field_validator
-from core.file.file_obj import FileVar
-
from .types import SegmentType
@@ -37,6 +36,10 @@ def log(self) -> str:
def markdown(self) -> str:
return str(self.value)
+ @property
+ def size(self) -> int:
+ return sys.getsizeof(self.value)
+
def to_object(self) -> Any:
return self.value
@@ -73,68 +76,54 @@ class IntegerSegment(Segment):
value: int
-class FileSegment(Segment):
- value_type: SegmentType = SegmentType.FILE
- # TODO: embed FileVar in this model.
- value: FileVar
- @property
- def markdown(self) -> str:
- return self.value.to_markdown()
class ObjectSegment(Segment):
value_type: SegmentType = SegmentType.OBJECT
- value: Mapping[str, Segment]
+ value: Mapping[str, Any]
@property
def text(self) -> str:
- # TODO: Process variables.
return json.dumps(self.model_dump()['value'], ensure_ascii=False)
@property
def log(self) -> str:
- # TODO: Process variables.
return json.dumps(self.model_dump()['value'], ensure_ascii=False, indent=2)
@property
def markdown(self) -> str:
- # TODO: Use markdown code block
return json.dumps(self.model_dump()['value'], ensure_ascii=False, indent=2)
- def to_object(self):
- return {k: v.to_object() for k, v in self.value.items()}
-
class ArraySegment(Segment):
@property
def markdown(self) -> str:
- return '\n'.join(['- ' + item.markdown for item in self.value])
-
- def to_object(self):
- return [v.to_object() for v in self.value]
+ items = []
+ for item in self.value:
+ if hasattr(item, 'to_markdown'):
+ items.append(item.to_markdown())
+ else:
+ items.append(str(item))
+ return '\n'.join(items)
class ArrayAnySegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_ANY
- value: Sequence[Segment]
+ value: Sequence[Any]
class ArrayStringSegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_STRING
- value: Sequence[StringSegment]
+ value: Sequence[str]
class ArrayNumberSegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_NUMBER
- value: Sequence[FloatSegment | IntegerSegment]
+ value: Sequence[float | int]
class ArrayObjectSegment(ArraySegment):
value_type: SegmentType = SegmentType.ARRAY_OBJECT
- value: Sequence[ObjectSegment]
-
+ value: Sequence[Mapping[str, Any]]
-class ArrayFileSegment(ArraySegment):
- value_type: SegmentType = SegmentType.ARRAY_FILE
- value: Sequence[FileSegment]
diff --git a/api/core/app/segments/types.py b/api/core/app/segments/types.py
index a371058ef52bac..cdd2b0b4b09191 100644
--- a/api/core/app/segments/types.py
+++ b/api/core/app/segments/types.py
@@ -10,8 +10,6 @@ class SegmentType(str, Enum):
ARRAY_STRING = 'array[string]'
ARRAY_NUMBER = 'array[number]'
ARRAY_OBJECT = 'array[object]'
- ARRAY_FILE = 'array[file]'
OBJECT = 'object'
- FILE = 'file'
GROUP = 'group'
diff --git a/api/core/app/segments/variables.py b/api/core/app/segments/variables.py
index ac26e165425c3a..8fef707fcf298b 100644
--- a/api/core/app/segments/variables.py
+++ b/api/core/app/segments/variables.py
@@ -4,11 +4,9 @@
from .segments import (
ArrayAnySegment,
- ArrayFileSegment,
ArrayNumberSegment,
ArrayObjectSegment,
ArrayStringSegment,
- FileSegment,
FloatSegment,
IntegerSegment,
NoneSegment,
@@ -44,10 +42,6 @@ class IntegerVariable(IntegerSegment, Variable):
pass
-class FileVariable(FileSegment, Variable):
- pass
-
-
class ObjectVariable(ObjectSegment, Variable):
pass
@@ -68,9 +62,6 @@ class ArrayObjectVariable(ArrayObjectSegment, Variable):
pass
-class ArrayFileVariable(ArrayFileSegment, Variable):
- pass
-
class SecretVariable(StringVariable):
value_type: SegmentType = SegmentType.SECRET
diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
index c9644c7d4cf1c0..8d91a507a9e8ee 100644
--- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
+++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py
@@ -48,7 +48,8 @@
)
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.prompt.utils.prompt_message_util import PromptMessageUtil
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from events.message_event import message_was_created
diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py
index b4859edbd965d6..4935c43ac437e4 100644
--- a/api/core/app/task_pipeline/workflow_cycle_manage.py
+++ b/api/core/app/task_pipeline/workflow_cycle_manage.py
@@ -22,7 +22,8 @@
from core.app.task_pipeline.workflow_iteration_cycle_manage import WorkflowIterationCycleManage
from core.file.file_obj import FileVar
from core.model_runtime.utils.encoders import jsonable_encoder
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.tools.tool_manager import ToolManager
from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeType
from core.workflow.nodes.tool.entities import ToolNodeData
@@ -40,6 +41,7 @@
WorkflowRunStatus,
WorkflowRunTriggeredFrom,
)
+from services.workflow_service import WorkflowService
class WorkflowCycleManage(WorkflowIterationCycleManage):
@@ -97,7 +99,6 @@ def _init_workflow_run(self, workflow: Workflow,
def _workflow_run_success(
self, workflow_run: WorkflowRun,
- start_at: float,
total_tokens: int,
total_steps: int,
outputs: Optional[str] = None,
@@ -107,7 +108,6 @@ def _workflow_run_success(
"""
Workflow run success
:param workflow_run: workflow run
- :param start_at: start time
:param total_tokens: total tokens
:param total_steps: total steps
:param outputs: outputs
@@ -116,7 +116,7 @@ def _workflow_run_success(
"""
workflow_run.status = WorkflowRunStatus.SUCCEEDED.value
workflow_run.outputs = outputs
- workflow_run.elapsed_time = time.perf_counter() - start_at
+ workflow_run.elapsed_time = WorkflowService.get_elapsed_time(workflow_run_id=workflow_run.id)
workflow_run.total_tokens = total_tokens
workflow_run.total_steps = total_steps
workflow_run.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
@@ -139,7 +139,6 @@ def _workflow_run_success(
def _workflow_run_failed(
self, workflow_run: WorkflowRun,
- start_at: float,
total_tokens: int,
total_steps: int,
status: WorkflowRunStatus,
@@ -150,7 +149,6 @@ def _workflow_run_failed(
"""
Workflow run failed
:param workflow_run: workflow run
- :param start_at: start time
:param total_tokens: total tokens
:param total_steps: total steps
:param status: status
@@ -159,7 +157,7 @@ def _workflow_run_failed(
"""
workflow_run.status = status.value
workflow_run.error = error
- workflow_run.elapsed_time = time.perf_counter() - start_at
+ workflow_run.elapsed_time = WorkflowService.get_elapsed_time(workflow_run_id=workflow_run.id)
workflow_run.total_tokens = total_tokens
workflow_run.total_steps = total_steps
workflow_run.finished_at = datetime.now(timezone.utc).replace(tzinfo=None)
@@ -542,7 +540,6 @@ def _handle_workflow_finished(
if isinstance(event, QueueStopEvent):
workflow_run = self._workflow_run_failed(
workflow_run=workflow_run,
- start_at=self._task_state.start_at,
total_tokens=self._task_state.total_tokens,
total_steps=self._task_state.total_steps,
status=WorkflowRunStatus.STOPPED,
@@ -565,7 +562,6 @@ def _handle_workflow_finished(
elif isinstance(event, QueueWorkflowFailedEvent):
workflow_run = self._workflow_run_failed(
workflow_run=workflow_run,
- start_at=self._task_state.start_at,
total_tokens=self._task_state.total_tokens,
total_steps=self._task_state.total_steps,
status=WorkflowRunStatus.FAILED,
@@ -583,7 +579,6 @@ def _handle_workflow_finished(
workflow_run = self._workflow_run_success(
workflow_run=workflow_run,
- start_at=self._task_state.start_at,
total_tokens=self._task_state.total_tokens,
total_steps=self._task_state.total_steps,
outputs=outputs,
diff --git a/api/core/app/task_pipeline/workflow_cycle_state_manager.py b/api/core/app/task_pipeline/workflow_cycle_state_manager.py
index 545f31fddfaedb..8baa8ba09e4b00 100644
--- a/api/core/app/task_pipeline/workflow_cycle_state_manager.py
+++ b/api/core/app/task_pipeline/workflow_cycle_state_manager.py
@@ -2,7 +2,7 @@
from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, WorkflowAppGenerateEntity
from core.app.entities.task_entities import AdvancedChatTaskState, WorkflowTaskState
-from core.workflow.entities.node_entities import SystemVariable
+from core.workflow.enums import SystemVariable
from models.account import Account
from models.model import EndUser
from models.workflow import Workflow
@@ -13,4 +13,4 @@ class WorkflowCycleStateManager:
_workflow: Workflow
_user: Union[Account, EndUser]
_task_state: Union[AdvancedChatTaskState, WorkflowTaskState]
- _workflow_system_variables: dict[SystemVariable, Any]
\ No newline at end of file
+ _workflow_system_variables: dict[SystemVariable, Any]
diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py
index 03f8244bab212e..578996574739a8 100644
--- a/api/core/callback_handler/agent_tool_callback_handler.py
+++ b/api/core/callback_handler/agent_tool_callback_handler.py
@@ -4,7 +4,8 @@
from pydantic import BaseModel
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.tools.entities.tool_entities import ToolInvokeMessage
_TEXT_COLOR_MAPPING = {
diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py
index f3cf54a58ed211..778ef2e1ac42ad 100644
--- a/api/core/entities/provider_configuration.py
+++ b/api/core/entities/provider_configuration.py
@@ -8,6 +8,7 @@
from pydantic import BaseModel, ConfigDict
+from constants import HIDDEN_VALUE
from core.entities.model_entities import ModelStatus, ModelWithProviderEntity, SimpleModelProviderEntity
from core.entities.provider_entities import (
CustomConfiguration,
@@ -202,7 +203,7 @@ def custom_credentials_validate(self, credentials: dict) -> tuple[Provider, dict
for key, value in credentials.items():
if key in provider_credential_secret_variables:
# if send [__HIDDEN__] in secret input, it will be same as original value
- if value == '[__HIDDEN__]' and key in original_credentials:
+ if value == HIDDEN_VALUE and key in original_credentials:
credentials[key] = encrypter.decrypt_token(self.tenant_id, original_credentials[key])
credentials = model_provider_factory.provider_credentials_validate(
@@ -345,7 +346,7 @@ def custom_model_credentials_validate(self, model_type: ModelType, model: str, c
for key, value in credentials.items():
if key in provider_credential_secret_variables:
# if send [__HIDDEN__] in secret input, it will be same as original value
- if value == '[__HIDDEN__]' and key in original_credentials:
+ if value == HIDDEN_VALUE and key in original_credentials:
credentials[key] = encrypter.decrypt_token(self.tenant_id, original_credentials[key])
credentials = model_provider_factory.model_credentials_validate(
diff --git a/api/core/file/file_obj.py b/api/core/file/file_obj.py
index 268ef5df867988..3959f4b4a0bb61 100644
--- a/api/core/file/file_obj.py
+++ b/api/core/file/file_obj.py
@@ -1,14 +1,19 @@
import enum
-from typing import Optional
+from typing import Any, Optional
from pydantic import BaseModel
-from core.app.app_config.entities import FileExtraConfig
from core.file.tool_file_parser import ToolFileParser
from core.file.upload_file_parser import UploadFileParser
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from extensions.ext_database import db
-from models.model import UploadFile
+
+
+class FileExtraConfig(BaseModel):
+ """
+ File Upload Entity.
+ """
+ image_config: Optional[dict[str, Any]] = None
class FileType(enum.Enum):
@@ -114,6 +119,7 @@ def prompt_message_content(self) -> ImagePromptMessageContent:
)
def _get_data(self, force_url: bool = False) -> Optional[str]:
+ from models.model import UploadFile
if self.type == FileType.IMAGE:
if self.transfer_method == FileTransferMethod.REMOTE_URL:
return self.url
diff --git a/api/core/file/message_file_parser.py b/api/core/file/message_file_parser.py
index 7b2f8217f9231f..085ff07cfde921 100644
--- a/api/core/file/message_file_parser.py
+++ b/api/core/file/message_file_parser.py
@@ -1,10 +1,11 @@
+import re
from collections.abc import Mapping, Sequence
from typing import Any, Union
+from urllib.parse import parse_qs, urlparse
import requests
-from core.app.app_config.entities import FileExtraConfig
-from core.file.file_obj import FileBelongsTo, FileTransferMethod, FileType, FileVar
+from core.file.file_obj import FileBelongsTo, FileExtraConfig, FileTransferMethod, FileType, FileVar
from extensions.ext_database import db
from models.account import Account
from models.model import EndUser, MessageFile, UploadFile
@@ -98,7 +99,7 @@ def validate_and_transform_files_arg(self, files: Sequence[Mapping[str, Any]], f
# return all file objs
return new_files
- def transform_message_files(self, files: list[MessageFile], file_extra_config: FileExtraConfig) -> list[FileVar]:
+ def transform_message_files(self, files: list[MessageFile], file_extra_config: FileExtraConfig):
"""
transform message files
@@ -143,7 +144,7 @@ def _to_file_objs(self, files: list[Union[dict, MessageFile]],
return type_file_objs
- def _to_file_obj(self, file: Union[dict, MessageFile], file_extra_config: FileExtraConfig) -> FileVar:
+ def _to_file_obj(self, file: Union[dict, MessageFile], file_extra_config: FileExtraConfig):
"""
transform file to file obj
@@ -186,6 +187,30 @@ def _check_image_remote_url(self, url):
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
+ def is_s3_presigned_url(url):
+ try:
+ parsed_url = urlparse(url)
+ if 'amazonaws.com' not in parsed_url.netloc:
+ return False
+ query_params = parse_qs(parsed_url.query)
+ required_params = ['Signature', 'Expires']
+ for param in required_params:
+ if param not in query_params:
+ return False
+ if not query_params['Expires'][0].isdigit():
+ return False
+ signature = query_params['Signature'][0]
+ if not re.match(r'^[A-Za-z0-9+/]+={0,2}$', signature):
+ return False
+ return True
+ except Exception:
+ return False
+
+ if is_s3_presigned_url(url):
+ response = requests.get(url, headers=headers, allow_redirects=True)
+ if response.status_code in {200, 304}:
+ return True, ""
+
response = requests.head(url, headers=headers, allow_redirects=True)
if response.status_code in {200, 304}:
return True, ""
diff --git a/api/core/helper/encrypter.py b/api/core/helper/encrypter.py
index bf87a842c00bf4..5e5deb86b47e54 100644
--- a/api/core/helper/encrypter.py
+++ b/api/core/helper/encrypter.py
@@ -2,7 +2,6 @@
from extensions.ext_database import db
from libs import rsa
-from models.account import Tenant
def obfuscated_token(token: str):
@@ -14,6 +13,7 @@ def obfuscated_token(token: str):
def encrypt_token(tenant_id: str, token: str):
+ from models.account import Tenant
if not (tenant := db.session.query(Tenant).filter(Tenant.id == tenant_id).first()):
raise ValueError(f'Tenant with id {tenant_id} not found')
encrypted_token = rsa.encrypt(token, tenant.encrypt_public_key)
diff --git a/api/core/helper/position_helper.py b/api/core/helper/position_helper.py
index dd1534c791b313..93e3a87124a889 100644
--- a/api/core/helper/position_helper.py
+++ b/api/core/helper/position_helper.py
@@ -3,12 +3,13 @@
from collections.abc import Callable
from typing import Any
+from configs import dify_config
from core.tools.utils.yaml_utils import load_yaml_file
def get_position_map(folder_path: str, *, file_name: str = "_position.yaml") -> dict[str, int]:
"""
- Get the mapping from name to index from a YAML file
+ Get the mapping from name to index from a YAML file.
:param folder_path:
:param file_name: the YAML file name, default to '_position.yaml'
:return: a dict with name as key and index as value
@@ -19,6 +20,64 @@ def get_position_map(folder_path: str, *, file_name: str = "_position.yaml") ->
return {name: index for index, name in enumerate(positions)}
+def get_tool_position_map(folder_path: str, file_name: str = "_position.yaml") -> dict[str, int]:
+ """
+ Get the mapping for tools from name to index from a YAML file.
+ :param folder_path:
+ :param file_name: the YAML file name, default to '_position.yaml'
+ :return: a dict with name as key and index as value
+ """
+ position_map = get_position_map(folder_path, file_name=file_name)
+
+ return sort_and_filter_position_map(
+ position_map,
+ pin_list=dify_config.POSITION_TOOL_PINS_LIST,
+ include_list=dify_config.POSITION_TOOL_INCLUDES_LIST,
+ exclude_list=dify_config.POSITION_TOOL_EXCLUDES_LIST
+ )
+
+
+def get_provider_position_map(folder_path: str, file_name: str = "_position.yaml") -> dict[str, int]:
+ """
+ Get the mapping for providers from name to index from a YAML file.
+ :param folder_path:
+ :param file_name: the YAML file name, default to '_position.yaml'
+ :return: a dict with name as key and index as value
+ """
+ position_map = get_position_map(folder_path, file_name=file_name)
+ return sort_and_filter_position_map(
+ position_map,
+ pin_list=dify_config.POSITION_PROVIDER_PINS_LIST,
+ include_list=dify_config.POSITION_PROVIDER_INCLUDES_LIST,
+ exclude_list=dify_config.POSITION_PROVIDER_EXCLUDES_LIST
+ )
+
+
+def sort_and_filter_position_map(original_position_map: dict[str, int], pin_list: list[str], include_list: list[str], exclude_list: list[str]) -> dict[str, int]:
+ """
+ Sort and filter the positions
+ :param position_map: the position map to be sorted and filtered
+ :param pin_list: the list of pins to be put at the beginning
+ :param include_set: the set of names to be included
+ :param exclude_set: the set of names to be excluded
+ :return: the sorted and filtered position map
+ """
+ positions = sorted(original_position_map.keys(), key=lambda x: original_position_map[x])
+ include_set = set(include_list) if include_list else set(positions)
+ exclude_set = set(exclude_list) if exclude_list else set()
+
+ # Add pins to position map
+ position_map = {name: idx for idx, name in enumerate(pin_list) if name in original_position_map}
+
+ # Add remaining positions to position map, respecting include and exclude lists
+ start_idx = len(position_map)
+ for name in positions:
+ if name in include_set and name not in exclude_set and name not in position_map:
+ position_map[name] = start_idx
+ start_idx += 1
+ return position_map
+
+
def sort_by_position_map(
position_map: dict[str, int],
data: list[Any],
@@ -35,7 +94,9 @@ def sort_by_position_map(
if not position_map or not data:
return data
- return sorted(data, key=lambda x: position_map.get(name_func(x), float('inf')))
+ filtered_data = [item for item in data if name_func(item) in position_map]
+
+ return sorted(filtered_data, key=lambda x: position_map.get(name_func(x), float('inf')))
def sort_to_dict_by_position_map(
diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py
index 0b5029460a03ff..8c13b4a45cbe6c 100644
--- a/api/core/llm_generator/llm_generator.py
+++ b/api/core/llm_generator/llm_generator.py
@@ -14,7 +14,8 @@
from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.ops.utils import measure_time
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
diff --git a/api/core/model_runtime/entities/defaults.py b/api/core/model_runtime/entities/defaults.py
index 87fe4f681ce5c7..d2076bf74a3cde 100644
--- a/api/core/model_runtime/entities/defaults.py
+++ b/api/core/model_runtime/entities/defaults.py
@@ -1,4 +1,3 @@
-
from core.model_runtime.entities.model_entities import DefaultParameterName
PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
@@ -94,5 +93,16 @@
},
'required': False,
'options': ['JSON', 'XML'],
- }
-}
\ No newline at end of file
+ },
+ DefaultParameterName.JSON_SCHEMA: {
+ 'label': {
+ 'en_US': 'JSON Schema',
+ },
+ 'type': 'text',
+ 'help': {
+ 'en_US': 'Set a response json schema will ensure LLM to adhere it.',
+ 'zh_Hans': '设置返回的json schema,llm将按照它返回',
+ },
+ 'required': False,
+ },
+}
diff --git a/api/core/model_runtime/entities/model_entities.py b/api/core/model_runtime/entities/model_entities.py
index 3d471787bbef8e..c257ce63d27926 100644
--- a/api/core/model_runtime/entities/model_entities.py
+++ b/api/core/model_runtime/entities/model_entities.py
@@ -95,6 +95,7 @@ class DefaultParameterName(Enum):
FREQUENCY_PENALTY = "frequency_penalty"
MAX_TOKENS = "max_tokens"
RESPONSE_FORMAT = "response_format"
+ JSON_SCHEMA = "json_schema"
@classmethod
def value_of(cls, value: Any) -> 'DefaultParameterName':
@@ -118,6 +119,7 @@ class ParameterType(Enum):
INT = "int"
STRING = "string"
BOOLEAN = "boolean"
+ TEXT = "text"
class ModelPropertyKey(Enum):
diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py
index 0de216bf896fc2..716bb63566c372 100644
--- a/api/core/model_runtime/model_providers/__base/ai_model.py
+++ b/api/core/model_runtime/model_providers/__base/ai_model.py
@@ -151,9 +151,9 @@ def predefined_models(self) -> list[AIModelEntity]:
os.path.join(provider_model_type_path, model_schema_yaml)
for model_schema_yaml in os.listdir(provider_model_type_path)
if not model_schema_yaml.startswith('__')
- and not model_schema_yaml.startswith('_')
- and os.path.isfile(os.path.join(provider_model_type_path, model_schema_yaml))
- and model_schema_yaml.endswith('.yaml')
+ and not model_schema_yaml.startswith('_')
+ and os.path.isfile(os.path.join(provider_model_type_path, model_schema_yaml))
+ and model_schema_yaml.endswith('.yaml')
]
# get _position.yaml file path
diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml
index b4e024a81ec7bb..d10314ba039e63 100644
--- a/api/core/model_runtime/model_providers/_position.yaml
+++ b/api/core/model_runtime/model_providers/_position.yaml
@@ -36,3 +36,4 @@
- hunyuan
- siliconflow
- perfxcloud
+- zhinao
diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py
index ff34a116c78e05..335fa493cded9f 100644
--- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py
+++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py
@@ -379,8 +379,12 @@ def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict:
if not message_content.data.startswith("data:"):
# fetch image data from url
try:
- image_content = requests.get(message_content.data).content
- mime_type, _ = mimetypes.guess_type(message_content.data)
+ url = message_content.data
+ image_content = requests.get(url).content
+ if '?' in url:
+ url = url.split('?')[0]
+ mime_type, _ = mimetypes.guess_type(url)
+ base64_data = base64.b64encode(image_content).decode('utf-8')
except Exception as ex:
raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}")
else:
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/__init__.py b/api/core/model_runtime/model_providers/huggingface_tei/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.py b/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.py
new file mode 100644
index 00000000000000..94544662503974
--- /dev/null
+++ b/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.py
@@ -0,0 +1,11 @@
+import logging
+
+from core.model_runtime.model_providers.__base.model_provider import ModelProvider
+
+logger = logging.getLogger(__name__)
+
+
+class HuggingfaceTeiProvider(ModelProvider):
+
+ def validate_provider_credentials(self, credentials: dict) -> None:
+ pass
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.yaml b/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.yaml
new file mode 100644
index 00000000000000..f3a912d84d23d3
--- /dev/null
+++ b/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.yaml
@@ -0,0 +1,36 @@
+provider: huggingface_tei
+label:
+ en_US: Text Embedding Inference
+description:
+ en_US: A blazing fast inference solution for text embeddings models.
+ zh_Hans: 用于文本嵌入模型的超快速推理解决方案。
+background: "#FFF8DC"
+help:
+ title:
+ en_US: How to deploy Text Embedding Inference
+ zh_Hans: 如何部署 Text Embedding Inference
+ url:
+ en_US: https://github.com/huggingface/text-embeddings-inference
+supported_model_types:
+ - text-embedding
+ - rerank
+configurate_methods:
+ - customizable-model
+model_credential_schema:
+ model:
+ label:
+ en_US: Model Name
+ zh_Hans: 模型名称
+ placeholder:
+ en_US: Enter your model name
+ zh_Hans: 输入模型名称
+ credential_form_schemas:
+ - variable: server_url
+ label:
+ zh_Hans: 服务器URL
+ en_US: Server url
+ type: secret-input
+ required: true
+ placeholder:
+ zh_Hans: 在此输入Text Embedding Inference的服务器地址,如 http://192.168.1.100:8080
+ en_US: Enter the url of your Text Embedding Inference, e.g. http://192.168.1.100:8080
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/rerank/__init__.py b/api/core/model_runtime/model_providers/huggingface_tei/rerank/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py b/api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py
new file mode 100644
index 00000000000000..34013426de5b77
--- /dev/null
+++ b/api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py
@@ -0,0 +1,137 @@
+from typing import Optional
+
+import httpx
+
+from core.model_runtime.entities.common_entities import I18nObject
+from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType
+from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
+from core.model_runtime.errors.invoke import (
+ InvokeAuthorizationError,
+ InvokeBadRequestError,
+ InvokeConnectionError,
+ InvokeError,
+ InvokeRateLimitError,
+ InvokeServerUnavailableError,
+)
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.rerank_model import RerankModel
+from core.model_runtime.model_providers.huggingface_tei.tei_helper import TeiHelper
+
+
+class HuggingfaceTeiRerankModel(RerankModel):
+ """
+ Model class for Text Embedding Inference rerank model.
+ """
+
+ def _invoke(
+ self,
+ model: str,
+ credentials: dict,
+ query: str,
+ docs: list[str],
+ score_threshold: Optional[float] = None,
+ top_n: Optional[int] = None,
+ user: Optional[str] = None,
+ ) -> RerankResult:
+ """
+ Invoke rerank model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param query: search query
+ :param docs: docs for reranking
+ :param score_threshold: score threshold
+ :param top_n: top n
+ :param user: unique user id
+ :return: rerank result
+ """
+ if len(docs) == 0:
+ return RerankResult(model=model, docs=[])
+ server_url = credentials['server_url']
+
+ if server_url.endswith('/'):
+ server_url = server_url[:-1]
+
+ try:
+ results = TeiHelper.invoke_rerank(server_url, query, docs)
+
+ rerank_documents = []
+ for result in results:
+ rerank_document = RerankDocument(
+ index=result['index'],
+ text=result['text'],
+ score=result['score'],
+ )
+ if score_threshold is None or result['score'] >= score_threshold:
+ rerank_documents.append(rerank_document)
+ if top_n is not None and len(rerank_documents) >= top_n:
+ break
+
+ return RerankResult(model=model, docs=rerank_documents)
+ except httpx.HTTPStatusError as e:
+ raise InvokeServerUnavailableError(str(e))
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ server_url = credentials['server_url']
+ extra_args = TeiHelper.get_tei_extra_parameter(server_url, model)
+ if extra_args.model_type != 'reranker':
+ raise CredentialsValidateFailedError('Current model is not a rerank model')
+
+ credentials['context_size'] = extra_args.max_input_length
+
+ self.invoke(
+ model=model,
+ credentials=credentials,
+ query='Whose kasumi',
+ docs=[
+ 'Kasumi is a girl\'s name of Japanese origin meaning "mist".',
+ 'Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ',
+ 'and she leads a team named PopiParty.',
+ ],
+ score_threshold=0.8,
+ )
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
+
+ @property
+ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
+ """
+ Map model invoke error to unified error
+ The key is the error type thrown to the caller
+ The value is the error type thrown by the model,
+ which needs to be converted into a unified error type for the caller.
+
+ :return: Invoke error mapping
+ """
+ return {
+ InvokeConnectionError: [InvokeConnectionError],
+ InvokeServerUnavailableError: [InvokeServerUnavailableError],
+ InvokeRateLimitError: [InvokeRateLimitError],
+ InvokeAuthorizationError: [InvokeAuthorizationError],
+ InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError],
+ }
+
+ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+ """
+ used to define customizable model schema
+ """
+ entity = AIModelEntity(
+ model=model,
+ label=I18nObject(en_US=model),
+ fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+ model_type=ModelType.RERANK,
+ model_properties={
+ ModelPropertyKey.CONTEXT_SIZE: int(credentials.get('context_size', 512)),
+ },
+ parameter_rules=[],
+ )
+
+ return entity
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py
new file mode 100644
index 00000000000000..2aa785c89d27e6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py
@@ -0,0 +1,183 @@
+from threading import Lock
+from time import time
+from typing import Optional
+
+import httpx
+from requests.adapters import HTTPAdapter
+from requests.exceptions import ConnectionError, MissingSchema, Timeout
+from requests.sessions import Session
+from yarl import URL
+
+
+class TeiModelExtraParameter:
+ model_type: str
+ max_input_length: int
+ max_client_batch_size: int
+
+ def __init__(self, model_type: str, max_input_length: int, max_client_batch_size: Optional[int] = None) -> None:
+ self.model_type = model_type
+ self.max_input_length = max_input_length
+ self.max_client_batch_size = max_client_batch_size
+
+
+cache = {}
+cache_lock = Lock()
+
+
+class TeiHelper:
+ @staticmethod
+ def get_tei_extra_parameter(server_url: str, model_name: str) -> TeiModelExtraParameter:
+ TeiHelper._clean_cache()
+ with cache_lock:
+ if model_name not in cache:
+ cache[model_name] = {
+ 'expires': time() + 300,
+ 'value': TeiHelper._get_tei_extra_parameter(server_url),
+ }
+ return cache[model_name]['value']
+
+ @staticmethod
+ def _clean_cache() -> None:
+ try:
+ with cache_lock:
+ expired_keys = [model_uid for model_uid, model in cache.items() if model['expires'] < time()]
+ for model_uid in expired_keys:
+ del cache[model_uid]
+ except RuntimeError as e:
+ pass
+
+ @staticmethod
+ def _get_tei_extra_parameter(server_url: str) -> TeiModelExtraParameter:
+ """
+ get tei model extra parameter like model_type, max_input_length, max_batch_requests
+ """
+
+ url = str(URL(server_url) / 'info')
+
+ # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3
+ session = Session()
+ session.mount('http://', HTTPAdapter(max_retries=3))
+ session.mount('https://', HTTPAdapter(max_retries=3))
+
+ try:
+ response = session.get(url, timeout=10)
+ except (MissingSchema, ConnectionError, Timeout) as e:
+ raise RuntimeError(f'get tei model extra parameter failed, url: {url}, error: {e}')
+ if response.status_code != 200:
+ raise RuntimeError(
+ f'get tei model extra parameter failed, status code: {response.status_code}, response: {response.text}'
+ )
+
+ response_json = response.json()
+
+ model_type = response_json.get('model_type', {})
+ if len(model_type.keys()) < 1:
+ raise RuntimeError('model_type is empty')
+ model_type = list(model_type.keys())[0]
+ if model_type not in ['embedding', 'reranker']:
+ raise RuntimeError(f'invalid model_type: {model_type}')
+
+ max_input_length = response_json.get('max_input_length', 512)
+ max_client_batch_size = response_json.get('max_client_batch_size', 1)
+
+ return TeiModelExtraParameter(
+ model_type=model_type,
+ max_input_length=max_input_length,
+ max_client_batch_size=max_client_batch_size
+ )
+
+ @staticmethod
+ def invoke_tokenize(server_url: str, texts: list[str]) -> list[list[dict]]:
+ """
+ Invoke tokenize endpoint
+
+ Example response:
+ [
+ [
+ {
+ "id": 0,
+ "text": "",
+ "special": true,
+ "start": null,
+ "stop": null
+ },
+ {
+ "id": 7704,
+ "text": "str",
+ "special": false,
+ "start": 0,
+ "stop": 3
+ },
+ < MORE TOKENS >
+ ]
+ ]
+
+ :param server_url: server url
+ :param texts: texts to tokenize
+ """
+ resp = httpx.post(
+ f'{server_url}/tokenize',
+ json={'inputs': texts},
+ )
+ resp.raise_for_status()
+ return resp.json()
+
+ @staticmethod
+ def invoke_embeddings(server_url: str, texts: list[str]) -> dict:
+ """
+ Invoke embeddings endpoint
+
+ Example response:
+ {
+ "object": "list",
+ "data": [
+ {
+ "object": "embedding",
+ "embedding": [...],
+ "index": 0
+ }
+ ],
+ "model": "MODEL_NAME",
+ "usage": {
+ "prompt_tokens": 3,
+ "total_tokens": 3
+ }
+ }
+
+ :param server_url: server url
+ :param texts: texts to embed
+ """
+ # Use OpenAI compatible API here, which has usage tracking
+ resp = httpx.post(
+ f'{server_url}/v1/embeddings',
+ json={'input': texts},
+ )
+ resp.raise_for_status()
+ return resp.json()
+
+ @staticmethod
+ def invoke_rerank(server_url: str, query: str, docs: list[str]) -> list[dict]:
+ """
+ Invoke rerank endpoint
+
+ Example response:
+ [
+ {
+ "index": 0,
+ "text": "Deep Learning is ...",
+ "score": 0.9950755
+ }
+ ]
+
+ :param server_url: server url
+ :param texts: texts to rerank
+ :param candidates: candidates to rerank
+ """
+ params = {'query': query, 'texts': docs, 'return_text': True}
+
+ response = httpx.post(
+ server_url + '/rerank',
+ json=params,
+ )
+ response.raise_for_status()
+ return response.json()
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/__init__.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py
new file mode 100644
index 00000000000000..6897b87f6d7525
--- /dev/null
+++ b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py
@@ -0,0 +1,204 @@
+import time
+from typing import Optional
+
+from core.model_runtime.entities.common_entities import I18nObject
+from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
+from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
+from core.model_runtime.errors.invoke import (
+ InvokeAuthorizationError,
+ InvokeBadRequestError,
+ InvokeConnectionError,
+ InvokeError,
+ InvokeRateLimitError,
+ InvokeServerUnavailableError,
+)
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
+from core.model_runtime.model_providers.huggingface_tei.tei_helper import TeiHelper
+
+
+class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
+ """
+ Model class for Text Embedding Inference text embedding model.
+ """
+
+ def _invoke(
+ self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ ) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ credentials should be like:
+ {
+ 'server_url': 'server url',
+ 'model_uid': 'model uid',
+ }
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :return: embeddings result
+ """
+ server_url = credentials['server_url']
+
+ if server_url.endswith('/'):
+ server_url = server_url[:-1]
+
+
+ # get model properties
+ context_size = self._get_context_size(model, credentials)
+ max_chunks = self._get_max_chunks(model, credentials)
+
+ inputs = []
+ indices = []
+ used_tokens = 0
+
+ # get tokenized results from TEI
+ batched_tokenize_result = TeiHelper.invoke_tokenize(server_url, texts)
+
+ for i, (text, tokenize_result) in enumerate(zip(texts, batched_tokenize_result)):
+
+ # Check if the number of tokens is larger than the context size
+ num_tokens = len(tokenize_result)
+
+ if num_tokens >= context_size:
+ # Find the best cutoff point
+ pre_special_token_count = 0
+ for token in tokenize_result:
+ if token['special']:
+ pre_special_token_count += 1
+ else:
+ break
+ rest_special_token_count = len([token for token in tokenize_result if token['special']]) - pre_special_token_count
+
+ # Calculate the cutoff point, leave 20 extra space to avoid exceeding the limit
+ token_cutoff = context_size - rest_special_token_count - 20
+
+ # Find the cutoff index
+ cutpoint_token = tokenize_result[token_cutoff]
+ cutoff = cutpoint_token['start']
+
+ inputs.append(text[0: cutoff])
+ else:
+ inputs.append(text)
+ indices += [i]
+
+ batched_embeddings = []
+ _iter = range(0, len(inputs), max_chunks)
+
+ try:
+ used_tokens = 0
+ for i in _iter:
+ iter_texts = inputs[i : i + max_chunks]
+ results = TeiHelper.invoke_embeddings(server_url, iter_texts)
+ embeddings = results['data']
+ embeddings = [embedding['embedding'] for embedding in embeddings]
+ batched_embeddings.extend(embeddings)
+
+ usage = results['usage']
+ used_tokens += usage['total_tokens']
+ except RuntimeError as e:
+ raise InvokeServerUnavailableError(str(e))
+
+ usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
+
+ result = TextEmbeddingResult(model=model, embeddings=batched_embeddings, usage=usage)
+
+ return result
+
+ def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
+ """
+ Get number of tokens for given prompt messages
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :return:
+ """
+ num_tokens = 0
+ server_url = credentials['server_url']
+
+ if server_url.endswith('/'):
+ server_url = server_url[:-1]
+
+ batch_tokens = TeiHelper.invoke_tokenize(server_url, texts)
+ num_tokens = sum(len(tokens) for tokens in batch_tokens)
+ return num_tokens
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ server_url = credentials['server_url']
+ extra_args = TeiHelper.get_tei_extra_parameter(server_url, model)
+ print(extra_args)
+ if extra_args.model_type != 'embedding':
+ raise CredentialsValidateFailedError('Current model is not a embedding model')
+
+ credentials['context_size'] = extra_args.max_input_length
+ credentials['max_chunks'] = extra_args.max_client_batch_size
+ self._invoke(model=model, credentials=credentials, texts=['ping'])
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
+
+ @property
+ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
+ return {
+ InvokeConnectionError: [InvokeConnectionError],
+ InvokeServerUnavailableError: [InvokeServerUnavailableError],
+ InvokeRateLimitError: [InvokeRateLimitError],
+ InvokeAuthorizationError: [InvokeAuthorizationError],
+ InvokeBadRequestError: [KeyError],
+ }
+
+ def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
+ """
+ Calculate response usage
+
+ :param model: model name
+ :param credentials: model credentials
+ :param tokens: input tokens
+ :return: usage
+ """
+ # get input price info
+ input_price_info = self.get_price(
+ model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens
+ )
+
+ # transform usage
+ usage = EmbeddingUsage(
+ tokens=tokens,
+ total_tokens=tokens,
+ unit_price=input_price_info.unit_price,
+ price_unit=input_price_info.unit,
+ total_price=input_price_info.total_amount,
+ currency=input_price_info.currency,
+ latency=time.perf_counter() - self.started_at,
+ )
+
+ return usage
+
+ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None:
+ """
+ used to define customizable model schema
+ """
+
+ entity = AIModelEntity(
+ model=model,
+ label=I18nObject(en_US=model),
+ fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+ model_type=ModelType.TEXT_EMBEDDING,
+ model_properties={
+ ModelPropertyKey.MAX_CHUNKS: int(credentials.get('max_chunks', 1)),
+ ModelPropertyKey.CONTEXT_SIZE: int(credentials.get('context_size', 512)),
+ },
+ parameter_rules=[],
+ )
+
+ return entity
diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py
index 6d22f9d2d66622..0bdf6ec005056b 100644
--- a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py
+++ b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py
@@ -214,7 +214,7 @@ def _handle_stream_chat_response(self, model, credentials, prompt_messages, resp
def _handle_chat_response(self, credentials, model, prompt_messages, response):
usage = self._calc_response_usage(model, credentials, response.Usage.PromptTokens,
response.Usage.CompletionTokens)
- assistant_prompt_message = PromptMessage(role="assistant")
+ assistant_prompt_message = AssistantPromptMessage()
assistant_prompt_message.content = response.Choices[0].Message.Content
result = LLMResult(
model=model,
diff --git a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml b/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml
index acf576719c03c2..e6af62107eaa08 100644
--- a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml
+++ b/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml
@@ -1,4 +1,4 @@
model: jina-reranker-v2-base-multilingual
model_type: rerank
model_properties:
- context_size: 8192
+ context_size: 1024
diff --git a/api/core/model_runtime/model_providers/model_provider_factory.py b/api/core/model_runtime/model_providers/model_provider_factory.py
index b1660afafb12e4..e2d17e32575920 100644
--- a/api/core/model_runtime/model_providers/model_provider_factory.py
+++ b/api/core/model_runtime/model_providers/model_provider_factory.py
@@ -6,7 +6,7 @@
from pydantic import BaseModel, ConfigDict
from core.helper.module_import_helper import load_single_subclass_from_source
-from core.helper.position_helper import get_position_map, sort_to_dict_by_position_map
+from core.helper.position_helper import get_provider_position_map, sort_to_dict_by_position_map
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.provider_entities import ProviderConfig, ProviderEntity, SimpleProviderEntity
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
@@ -234,7 +234,7 @@ def _get_model_provider_map(self) -> dict[str, ModelProviderExtension]:
]
# get _position.yaml file path
- position_map = get_position_map(model_providers_path)
+ position_map = get_provider_position_map(model_providers_path)
# traverse all model_provider_dir_paths
model_providers: list[ModelProviderExtension] = []
diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py
index 069de9acec0a92..9e26d35afc9437 100644
--- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py
@@ -72,7 +72,7 @@ def _invoke(self, model: str, credentials: dict,
num_tokens = self._get_num_tokens_by_gpt2(text)
if num_tokens >= context_size:
- cutoff = int(len(text) * (np.floor(context_size / num_tokens)))
+ cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
# if num tokens is larger than context length, only use the start
inputs.append(text[0: cutoff])
else:
diff --git a/api/core/model_runtime/model_providers/openai/llm/_position.yaml b/api/core/model_runtime/model_providers/openai/llm/_position.yaml
index 91b9215829b00b..ac7313aaa1bf0b 100644
--- a/api/core/model_runtime/model_providers/openai/llm/_position.yaml
+++ b/api/core/model_runtime/model_providers/openai/llm/_position.yaml
@@ -1,6 +1,8 @@
- gpt-4
- gpt-4o
- gpt-4o-2024-05-13
+- gpt-4o-2024-08-06
+- chatgpt-4o-latest
- gpt-4o-mini
- gpt-4o-mini-2024-07-18
- gpt-4-turbo
diff --git a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml
new file mode 100644
index 00000000000000..98e236650c9e73
--- /dev/null
+++ b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml
@@ -0,0 +1,44 @@
+model: chatgpt-4o-latest
+label:
+ zh_Hans: chatgpt-4o-latest
+ en_US: chatgpt-4o-latest
+model_type: llm
+features:
+ - multi-tool-call
+ - agent-thought
+ - stream-tool-call
+ - vision
+model_properties:
+ mode: chat
+ context_size: 128000
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: presence_penalty
+ use_template: presence_penalty
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 16384
+ - name: response_format
+ label:
+ zh_Hans: 回复格式
+ en_US: response_format
+ type: string
+ help:
+ zh_Hans: 指定模型必须输出的格式
+ en_US: specifying the format that the model must output
+ required: false
+ options:
+ - text
+ - json_object
+pricing:
+ input: '2.50'
+ output: '10.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml
new file mode 100644
index 00000000000000..7e430c51a710fc
--- /dev/null
+++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml
@@ -0,0 +1,47 @@
+model: gpt-4o-2024-08-06
+label:
+ zh_Hans: gpt-4o-2024-08-06
+ en_US: gpt-4o-2024-08-06
+model_type: llm
+features:
+ - multi-tool-call
+ - agent-thought
+ - stream-tool-call
+ - vision
+model_properties:
+ mode: chat
+ context_size: 128000
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: presence_penalty
+ use_template: presence_penalty
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 16384
+ - name: response_format
+ label:
+ zh_Hans: 回复格式
+ en_US: response_format
+ type: string
+ help:
+ zh_Hans: 指定模型必须输出的格式
+ en_US: specifying the format that the model must output
+ required: false
+ options:
+ - text
+ - json_object
+ - json_schema
+ - name: json_schema
+ use_template: json_schema
+pricing:
+ input: '2.50'
+ output: '10.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml
index b97fbf8aabcae4..23dcf85085e123 100644
--- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml
+++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml
@@ -37,6 +37,9 @@ parameter_rules:
options:
- text
- json_object
+ - json_schema
+ - name: json_schema
+ use_template: json_schema
pricing:
input: '0.15'
output: '0.60'
diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py
index aae2729bdfb042..06135c958463e8 100644
--- a/api/core/model_runtime/model_providers/openai/llm/llm.py
+++ b/api/core/model_runtime/model_providers/openai/llm/llm.py
@@ -1,3 +1,4 @@
+import json
import logging
from collections.abc import Generator
from typing import Optional, Union, cast
@@ -544,13 +545,18 @@ def _chat_generate(self, model: str, credentials: dict,
response_format = model_parameters.get("response_format")
if response_format:
- if response_format == "json_object":
- response_format = {"type": "json_object"}
+ if response_format == "json_schema":
+ json_schema = model_parameters.get("json_schema")
+ if not json_schema:
+ raise ValueError("Must define JSON Schema when the response format is json_schema")
+ try:
+ schema = json.loads(json_schema)
+ except:
+ raise ValueError(f"not currect json_schema format: {json_schema}")
+ model_parameters.pop("json_schema")
+ model_parameters["response_format"] = {"type": "json_schema", "json_schema": schema}
else:
- response_format = {"type": "text"}
-
- model_parameters["response_format"] = response_format
-
+ model_parameters["response_format"] = {"type": response_format}
extra_model_kwargs = {}
@@ -922,11 +928,14 @@ def _num_tokens_from_messages(self, model: str, messages: list[PromptMessage],
tools: Optional[list[PromptMessageTool]] = None) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
- Official documentation: https://github.com/openai/openai-cookbook/blob/
- main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
+ Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if model.startswith('ft:'):
model = model.split(':')[1]
+ # Currently, we can use gpt4o to calculate chatgpt-4o-latest's token.
+ if model == "chatgpt-4o-latest":
+ model = "gpt-4o"
+
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
@@ -946,7 +955,7 @@ def _num_tokens_from_messages(self, model: str, messages: list[PromptMessage],
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
- "See https://github.com/openai/openai-python/blob/main/chatml.md for "
+ "See https://platform.openai.com/docs/advanced-usage/managing-tokens for "
"information on how messages are converted to tokens."
)
num_tokens = 0
diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml b/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml
index 69bed9603902a6..88c76fe16ef733 100644
--- a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml
+++ b/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml
@@ -7,6 +7,7 @@ description:
supported_model_types:
- llm
- text-embedding
+ - speech2text
configurate_methods:
- customizable-model
model_credential_schema:
@@ -61,6 +62,22 @@ model_credential_schema:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
+ show_on:
+ - variable: __model_type
+ value: llm
+ type: text-input
+ default: '4096'
+ placeholder:
+ zh_Hans: 在此输入您的模型上下文长度
+ en_US: Enter your Model context size
+ - variable: context_size
+ label:
+ zh_Hans: 模型上下文长度
+ en_US: Model context size
+ required: true
+ show_on:
+ - variable: __model_type
+ value: text-embedding
type: text-input
default: '4096'
placeholder:
diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/__init__.py b/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py b/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py
new file mode 100644
index 00000000000000..00702ba9367cf4
--- /dev/null
+++ b/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py
@@ -0,0 +1,63 @@
+from typing import IO, Optional
+from urllib.parse import urljoin
+
+import requests
+
+from core.model_runtime.errors.invoke import InvokeBadRequestError
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel
+from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOAI_API_Compat
+
+
+class OAICompatSpeech2TextModel(_CommonOAI_API_Compat, Speech2TextModel):
+ """
+ Model class for OpenAI Compatible Speech to text model.
+ """
+
+ def _invoke(
+ self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None
+ ) -> str:
+ """
+ Invoke speech2text model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param file: audio file
+ :param user: unique user id
+ :return: text for given audio file
+ """
+ headers = {}
+
+ api_key = credentials.get("api_key")
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ endpoint_url = credentials.get("endpoint_url")
+ if not endpoint_url.endswith("/"):
+ endpoint_url += "/"
+ endpoint_url = urljoin(endpoint_url, "audio/transcriptions")
+
+ payload = {"model": model}
+ files = [("file", file)]
+ response = requests.post(endpoint_url, headers=headers, data=payload, files=files)
+
+ if response.status_code != 200:
+ raise InvokeBadRequestError(response.text)
+ response_data = response.json()
+ return response_data["text"]
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ audio_file_path = self._get_demo_file_path()
+
+ with open(audio_file_path, "rb") as audio_file:
+ self._invoke(model, credentials, audio_file)
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py
index 3467cd6dfd97f9..363054b084a69c 100644
--- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py
@@ -76,7 +76,7 @@ def _invoke(self, model: str, credentials: dict,
num_tokens = self._get_num_tokens_by_gpt2(text)
if num_tokens >= context_size:
- cutoff = int(len(text) * (np.floor(context_size / num_tokens)))
+ cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
# if num tokens is larger than context length, only use the start
inputs.append(text[0: cutoff])
else:
diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py
index 5a99ad301f36fa..11d57e3749a8f1 100644
--- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py
@@ -79,7 +79,7 @@ def _invoke(self, model: str, credentials: dict,
num_tokens = self._get_num_tokens_by_gpt2(text)
if num_tokens >= context_size:
- cutoff = int(len(text) * (np.floor(context_size / num_tokens)))
+ cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
# if num tokens is larger than context length, only use the start
inputs.append(text[0: cutoff])
else:
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml
index 20bb0790c2234e..c2f0eb05360327 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml
@@ -1,8 +1,20 @@
-- deepseek-v2-chat
-- qwen2-72b-instruct
-- qwen2-57b-a14b-instruct
-- qwen2-7b-instruct
-- yi-1.5-34b-chat
-- yi-1.5-9b-chat
-- yi-1.5-6b-chat
-- glm4-9B-chat
+- Qwen/Qwen2-72B-Instruct
+- Qwen/Qwen2-57B-A14B-Instruct
+- Qwen/Qwen2-7B-Instruct
+- Qwen/Qwen2-1.5B-Instruct
+- 01-ai/Yi-1.5-34B-Chat
+- 01-ai/Yi-1.5-9B-Chat-16K
+- 01-ai/Yi-1.5-6B-Chat
+- THUDM/glm-4-9b-chat
+- deepseek-ai/DeepSeek-V2-Chat
+- deepseek-ai/DeepSeek-Coder-V2-Instruct
+- internlm/internlm2_5-7b-chat
+- google/gemma-2-27b-it
+- google/gemma-2-9b-it
+- meta-llama/Meta-Llama-3-70B-Instruct
+- meta-llama/Meta-Llama-3-8B-Instruct
+- meta-llama/Meta-Llama-3.1-405B-Instruct
+- meta-llama/Meta-Llama-3.1-70B-Instruct
+- meta-llama/Meta-Llama-3.1-8B-Instruct
+- mistralai/Mixtral-8x7B-Instruct-v0.1
+- mistralai/Mistral-7B-Instruct-v0.2
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml
index 3926568db6f2e9..caa6508b5ed2a2 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml
@@ -1,4 +1,4 @@
-model: deepseek-ai/deepseek-v2-chat
+model: deepseek-ai/DeepSeek-V2-Chat
label:
en_US: deepseek-ai/DeepSeek-V2-Chat
model_type: llm
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-27b-it.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-27b-it.yaml
new file mode 100644
index 00000000000000..2840e3dcf4b113
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-27b-it.yaml
@@ -0,0 +1,30 @@
+model: google/gemma-2-27b-it
+label:
+ en_US: google/gemma-2-27b-it
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 8196
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '1.26'
+ output: '1.26'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-9b-it.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-9b-it.yaml
new file mode 100644
index 00000000000000..d7e19b46f6d6f2
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-9b-it.yaml
@@ -0,0 +1,30 @@
+model: google/gemma-2-9b-it
+label:
+ en_US: google/gemma-2-9b-it
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 8196
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '0'
+ output: '0'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml
index d6a4b21b66d968..9b32a024774d06 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml
@@ -1,4 +1,4 @@
-model: zhipuai/glm4-9B-chat
+model: THUDM/glm-4-9b-chat
label:
en_US: THUDM/glm-4-9b-chat
model_type: llm
@@ -24,7 +24,7 @@ parameter_rules:
- name: frequency_penalty
use_template: frequency_penalty
pricing:
- input: '0.6'
- output: '0.6'
+ input: '0'
+ output: '0'
unit: '0.000001'
currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-7b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-7b-chat.yaml
new file mode 100644
index 00000000000000..73ad4480aa2968
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-7b-chat.yaml
@@ -0,0 +1,30 @@
+model: internlm/internlm2_5-7b-chat
+label:
+ en_US: internlm/internlm2_5-7b-chat
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '0'
+ output: '0'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-70b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-70b-instruct.yaml
new file mode 100644
index 00000000000000..9993d781ac8959
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-70b-instruct.yaml
@@ -0,0 +1,30 @@
+model: meta-llama/Meta-Llama-3-70B-Instruct
+label:
+ en_US: meta-llama/Meta-Llama-3-70B-Instruct
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '4.13'
+ output: '4.13'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-8b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-8b-instruct.yaml
new file mode 100644
index 00000000000000..60e3764789e1f5
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-8b-instruct.yaml
@@ -0,0 +1,30 @@
+model: meta-llama/Meta-Llama-3-8B-Instruct
+label:
+ en_US: meta-llama/Meta-Llama-3-8B-Instruct
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 8192
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '0'
+ output: '0'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-405b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-405b-instruct.yaml
new file mode 100644
index 00000000000000..f992660aa2e66f
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-405b-instruct.yaml
@@ -0,0 +1,30 @@
+model: meta-llama/Meta-Llama-3.1-405B-Instruct
+label:
+ en_US: meta-llama/Meta-Llama-3.1-405B-Instruct
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '21'
+ output: '21'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-70b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-70b-instruct.yaml
new file mode 100644
index 00000000000000..1c69d63a400219
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-70b-instruct.yaml
@@ -0,0 +1,30 @@
+model: meta-llama/Meta-Llama-3.1-70B-Instruct
+label:
+ en_US: meta-llama/Meta-Llama-3.1-70B-Instruct
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '4.13'
+ output: '4.13'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-8b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-8b-instruct.yaml
new file mode 100644
index 00000000000000..a97002a5ca3658
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-8b-instruct.yaml
@@ -0,0 +1,30 @@
+model: meta-llama/Meta-Llama-3.1-8B-Instruct
+label:
+ en_US: meta-llama/Meta-Llama-3.1-8B-Instruct
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 8192
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '0'
+ output: '0'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml
new file mode 100644
index 00000000000000..27664eab6c817a
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml
@@ -0,0 +1,30 @@
+model: mistralai/Mistral-7B-Instruct-v0.2
+label:
+ en_US: mistralai/Mistral-7B-Instruct-v0.2
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '0'
+ output: '0'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml
new file mode 100644
index 00000000000000..fd7aada42848aa
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml
@@ -0,0 +1,30 @@
+model: mistralai/Mixtral-8x7B-Instruct-v0.1
+label:
+ en_US: mistralai/Mixtral-8x7B-Instruct-v0.1
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '1.26'
+ output: '1.26'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-1.5b-instruct.yaml
new file mode 100644
index 00000000000000..f6c976af8e7b6e
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-1.5b-instruct.yaml
@@ -0,0 +1,30 @@
+model: Qwen/Qwen2-1.5B-Instruct
+label:
+ en_US: Qwen/Qwen2-1.5B-Instruct
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32768
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 512
+ min: 1
+ max: 4096
+ help:
+ zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
+ en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
+ - name: top_p
+ use_template: top_p
+ - name: frequency_penalty
+ use_template: frequency_penalty
+pricing:
+ input: '0'
+ output: '0'
+ unit: '0.000001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml
index 39624dc5b97c3d..a996e919ea9f27 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml
@@ -1,4 +1,4 @@
-model: alibaba/Qwen2-57B-A14B-Instruct
+model: Qwen/Qwen2-57B-A14B-Instruct
label:
en_US: Qwen/Qwen2-57B-A14B-Instruct
model_type: llm
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml
index fb7ff6cb14dbd7..a6e2c22dac87c0 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml
@@ -1,4 +1,4 @@
-model: alibaba/Qwen2-72B-Instruct
+model: Qwen/Qwen2-72B-Instruct
label:
en_US: Qwen/Qwen2-72B-Instruct
model_type: llm
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml
index efda4abbd9965e..d8bea5e12927e7 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml
@@ -1,4 +1,4 @@
-model: alibaba/Qwen2-7B-Instruct
+model: Qwen/Qwen2-7B-Instruct
label:
en_US: Qwen/Qwen2-7B-Instruct
model_type: llm
@@ -24,7 +24,7 @@ parameter_rules:
- name: frequency_penalty
use_template: frequency_penalty
pricing:
- input: '0.35'
- output: '0.35'
+ input: '0'
+ output: '0'
unit: '0.000001'
currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml
index 38cd4197d4c3dd..fe4c8b4b3e0350 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml
@@ -24,7 +24,7 @@ parameter_rules:
- name: frequency_penalty
use_template: frequency_penalty
pricing:
- input: '0.35'
- output: '0.35'
+ input: '0'
+ output: '0'
unit: '0.000001'
currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml
index 042eeea81a48f0..c61f0dc53fe6ec 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml
@@ -1,4 +1,4 @@
-model: 01-ai/Yi-1.5-9B-Chat
+model: 01-ai/Yi-1.5-9B-Chat-16K
label:
en_US: 01-ai/Yi-1.5-9B-Chat-16K
model_type: llm
@@ -24,7 +24,7 @@ parameter_rules:
- name: frequency_penalty
use_template: frequency_penalty
pricing:
- input: '0.42'
- output: '0.42'
+ input: '0'
+ output: '0'
unit: '0.000001'
currency: RMB
diff --git a/api/core/model_runtime/model_providers/siliconflow/siliconflow.py b/api/core/model_runtime/model_providers/siliconflow/siliconflow.py
index a53f16c929728e..dd0eea362a5f83 100644
--- a/api/core/model_runtime/model_providers/siliconflow/siliconflow.py
+++ b/api/core/model_runtime/model_providers/siliconflow/siliconflow.py
@@ -6,6 +6,7 @@
logger = logging.getLogger(__name__)
+
class SiliconflowProvider(ModelProvider):
def validate_provider_credentials(self, credentials: dict) -> None:
diff --git a/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml b/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml
index cf44c185d59a21..1ebb1e6d8b149c 100644
--- a/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml
@@ -15,6 +15,8 @@ help:
en_US: https://cloud.siliconflow.cn/keys
supported_model_types:
- llm
+ - text-embedding
+ - speech2text
configurate_methods:
- predefined-model
provider_credential_schema:
diff --git a/api/core/model_runtime/model_providers/siliconflow/speech2text/__init__.py b/api/core/model_runtime/model_providers/siliconflow/speech2text/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/siliconflow/speech2text/sense-voice-small.yaml b/api/core/model_runtime/model_providers/siliconflow/speech2text/sense-voice-small.yaml
new file mode 100644
index 00000000000000..deceaf60f4f017
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/speech2text/sense-voice-small.yaml
@@ -0,0 +1,5 @@
+model: iic/SenseVoiceSmall
+model_type: speech2text
+model_properties:
+ file_upload_limit: 1
+ supported_file_extensions: mp3,wav
diff --git a/api/core/model_runtime/model_providers/siliconflow/speech2text/speech2text.py b/api/core/model_runtime/model_providers/siliconflow/speech2text/speech2text.py
new file mode 100644
index 00000000000000..6ad3cab5873c69
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/speech2text/speech2text.py
@@ -0,0 +1,32 @@
+from typing import IO, Optional
+
+from core.model_runtime.model_providers.openai_api_compatible.speech2text.speech2text import OAICompatSpeech2TextModel
+
+
+class SiliconflowSpeech2TextModel(OAICompatSpeech2TextModel):
+ """
+ Model class for Siliconflow Speech to text model.
+ """
+
+ def _invoke(
+ self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None
+ ) -> str:
+ """
+ Invoke speech2text model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param file: audio file
+ :param user: unique user id
+ :return: text for given audio file
+ """
+ self._add_custom_parameters(credentials)
+ return super()._invoke(model, credentials, file)
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ self._add_custom_parameters(credentials)
+ return super().validate_credentials(model, credentials)
+
+ @classmethod
+ def _add_custom_parameters(cls, credentials: dict) -> None:
+ credentials["endpoint_url"] = "https://api.siliconflow.cn/v1"
diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bce-embedding-base-v1.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bce-embedding-base-v1.yaml
new file mode 100644
index 00000000000000..710fbc04f6ad12
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bce-embedding-base-v1.yaml
@@ -0,0 +1,5 @@
+model: netease-youdao/bce-embedding-base_v1
+model_type: text-embedding
+model_properties:
+ context_size: 512
+ max_chunks: 1
diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-en-v1.5.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-en-v1.5.yaml
new file mode 100644
index 00000000000000..84f69b41a08c13
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-en-v1.5.yaml
@@ -0,0 +1,5 @@
+model: BAAI/bge-large-en-v1.5
+model_type: text-embedding
+model_properties:
+ context_size: 512
+ max_chunks: 1
diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-zh-v1.5.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-zh-v1.5.yaml
new file mode 100644
index 00000000000000..5248375d0b507e
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-zh-v1.5.yaml
@@ -0,0 +1,5 @@
+model: BAAI/bge-large-zh-v1.5
+model_type: text-embedding
+model_properties:
+ context_size: 512
+ max_chunks: 1
diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-m3.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-m3.yaml
new file mode 100644
index 00000000000000..f0b12dd420ab2b
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-m3.yaml
@@ -0,0 +1,5 @@
+model: BAAI/bge-m3
+model_type: text-embedding
+model_properties:
+ context_size: 8192
+ max_chunks: 1
diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py
new file mode 100644
index 00000000000000..c58765cecb9a69
--- /dev/null
+++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py
@@ -0,0 +1,29 @@
+from typing import Optional
+
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import (
+ OAICompatEmbeddingModel,
+)
+
+
+class SiliconflowTextEmbeddingModel(OAICompatEmbeddingModel):
+ """
+ Model class for Siliconflow text embedding model.
+ """
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ self._add_custom_parameters(credentials)
+ super().validate_credentials(model, credentials)
+
+ def _invoke(self, model: str, credentials: dict,
+ texts: list[str], user: Optional[str] = None) \
+ -> TextEmbeddingResult:
+ self._add_custom_parameters(credentials)
+ return super()._invoke(model, credentials, texts, user)
+
+ def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
+ self._add_custom_parameters(credentials)
+ return super().get_num_tokens(model, credentials, texts)
+
+ @classmethod
+ def _add_custom_parameters(cls, credentials: dict) -> None:
+ credentials['endpoint_url'] = 'https://api.siliconflow.cn/v1'
\ No newline at end of file
diff --git a/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml b/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml
index b34433e1d4d150..2bb0c703f4aa39 100644
--- a/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml
+++ b/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml
@@ -2,5 +2,7 @@
- step-1-32k
- step-1-128k
- step-1-256k
+- step-1-flash
+- step-2-16k
- step-1v-8k
- step-1v-32k
diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1-flash.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1-flash.yaml
new file mode 100644
index 00000000000000..afb880f2a40bbc
--- /dev/null
+++ b/api/core/model_runtime/model_providers/stepfun/llm/step-1-flash.yaml
@@ -0,0 +1,25 @@
+model: step-1-flash
+label:
+ zh_Hans: step-1-flash
+ en_US: step-1-flash
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 8000
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 8000
+pricing:
+ input: '0.001'
+ output: '0.004'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml
index f878ee3e56e1f9..08d6ad245d2dd6 100644
--- a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml
+++ b/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml
@@ -5,6 +5,9 @@ label:
model_type: llm
features:
- vision
+ - tool-call
+ - multi-tool-call
+ - stream-tool-call
model_properties:
mode: chat
context_size: 32000
diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml
index 6c3cb61d2c6621..843d14d9c67e80 100644
--- a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml
+++ b/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml
@@ -5,6 +5,9 @@ label:
model_type: llm
features:
- vision
+ - tool-call
+ - multi-tool-call
+ - stream-tool-call
model_properties:
mode: chat
context_size: 8192
diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-2-16k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-2-16k.yaml
new file mode 100644
index 00000000000000..6f2dabbfb0e308
--- /dev/null
+++ b/api/core/model_runtime/model_providers/stepfun/llm/step-2-16k.yaml
@@ -0,0 +1,28 @@
+model: step-2-16k
+label:
+ zh_Hans: step-2-16k
+ en_US: step-2-16k
+model_type: llm
+features:
+ - agent-thought
+ - tool-call
+ - multi-tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 16000
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 1024
+ min: 1
+ max: 16000
+pricing:
+ input: '0.038'
+ output: '0.120'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
new file mode 100644
index 00000000000000..aad07f56736e52
--- /dev/null
+++ b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
@@ -0,0 +1,81 @@
+model: farui-plus
+label:
+ en_US: farui-plus
+model_type: llm
+features:
+ - multi-tool-call
+ - agent-thought
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 12288
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ type: float
+ default: 0.3
+ min: 0.0
+ max: 2.0
+ help:
+ zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
+ en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
+ - name: max_tokens
+ use_template: max_tokens
+ type: int
+ default: 2000
+ min: 1
+ max: 2000
+ help:
+ zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
+ en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
+ - name: top_p
+ use_template: top_p
+ type: float
+ default: 0.8
+ min: 0.1
+ max: 0.9
+ help:
+ zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
+ en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
+ - name: top_k
+ type: int
+ min: 0
+ max: 99
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ help:
+ zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
+ en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
+ - name: seed
+ required: false
+ type: int
+ default: 1234
+ label:
+ zh_Hans: 随机种子
+ en_US: Random seed
+ help:
+ zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
+ en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
+ - name: repetition_penalty
+ required: false
+ type: float
+ default: 1.1
+ label:
+ en_US: Repetition penalty
+ help:
+ zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
+ en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
+ - name: enable_search
+ type: boolean
+ default: false
+ help:
+ zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
+ en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
+ - name: response_format
+ use_template: response_format
+pricing:
+ input: '0.02'
+ output: '0.02'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml
index eed09f95dedea7..f4303c53d38b80 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml
@@ -2,3 +2,8 @@ model: text-embedding-v1
model_type: text-embedding
model_properties:
context_size: 2048
+ max_chunks: 25
+pricing:
+ input: "0.0007"
+ unit: "0.001"
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml
index db2fa861e69f90..f6be3544ed8f65 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml
@@ -2,3 +2,8 @@ model: text-embedding-v2
model_type: text-embedding
model_properties:
context_size: 2048
+ max_chunks: 25
+pricing:
+ input: "0.0007"
+ unit: "0.001"
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py
index c207ffc1e34bbb..e7e1b5c764c093 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py
@@ -2,6 +2,7 @@
from typing import Optional
import dashscope
+import numpy as np
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import (
@@ -21,11 +22,11 @@ class TongyiTextEmbeddingModel(_CommonTongyi, TextEmbeddingModel):
"""
def _invoke(
- self,
- model: str,
- credentials: dict,
- texts: list[str],
- user: Optional[str] = None,
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -37,16 +38,44 @@ def _invoke(
:return: embeddings result
"""
credentials_kwargs = self._to_credential_kwargs(credentials)
- embeddings, embedding_used_tokens = self.embed_documents(
- credentials_kwargs=credentials_kwargs,
- model=model,
- texts=texts
- )
+ context_size = self._get_context_size(model, credentials)
+ max_chunks = self._get_max_chunks(model, credentials)
+ inputs = []
+ indices = []
+ used_tokens = 0
+
+ for i, text in enumerate(texts):
+
+ # Here token count is only an approximation based on the GPT2 tokenizer
+ num_tokens = self._get_num_tokens_by_gpt2(text)
+
+ if num_tokens >= context_size:
+ cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
+ # if num tokens is larger than context length, only use the start
+ inputs.append(text[0:cutoff])
+ else:
+ inputs.append(text)
+ indices += [i]
+
+ batched_embeddings = []
+ _iter = range(0, len(inputs), max_chunks)
+
+ for i in _iter:
+ embeddings_batch, embedding_used_tokens = self.embed_documents(
+ credentials_kwargs=credentials_kwargs,
+ model=model,
+ texts=inputs[i : i + max_chunks],
+ )
+ used_tokens += embedding_used_tokens
+ batched_embeddings += embeddings_batch
+
+ # calc usage
+ usage = self._calc_response_usage(
+ model=model, credentials=credentials, tokens=used_tokens
+ )
return TextEmbeddingResult(
- embeddings=embeddings,
- usage=self._calc_response_usage(model, credentials_kwargs, embedding_used_tokens),
- model=model
+ embeddings=batched_embeddings, usage=usage, model=model
)
def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
@@ -79,12 +108,16 @@ def validate_credentials(self, model: str, credentials: dict) -> None:
credentials_kwargs = self._to_credential_kwargs(credentials)
# call embedding model
- self.embed_documents(credentials_kwargs=credentials_kwargs, model=model, texts=["ping"])
+ self.embed_documents(
+ credentials_kwargs=credentials_kwargs, model=model, texts=["ping"]
+ )
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
@staticmethod
- def embed_documents(credentials_kwargs: dict, model: str, texts: list[str]) -> tuple[list[list[float]], int]:
+ def embed_documents(
+ credentials_kwargs: dict, model: str, texts: list[str]
+ ) -> tuple[list[list[float]], int]:
"""Call out to Tongyi's embedding endpoint.
Args:
@@ -102,7 +135,7 @@ def embed_documents(credentials_kwargs: dict, model: str, texts: list[str]) -> t
api_key=credentials_kwargs["dashscope_api_key"],
model=model,
input=text,
- text_type="document"
+ text_type="document",
)
data = response.output["embeddings"][0]
embeddings.append(data["embedding"])
@@ -111,7 +144,7 @@ def embed_documents(credentials_kwargs: dict, model: str, texts: list[str]) -> t
return [list(map(float, e)) for e in embeddings], embedding_used_tokens
def _calc_response_usage(
- self, model: str, credentials: dict, tokens: int
+ self, model: str, credentials: dict, tokens: int
) -> EmbeddingUsage:
"""
Calculate response usage
@@ -125,7 +158,7 @@ def _calc_response_usage(
model=model,
credentials=credentials,
price_type=PriceType.INPUT,
- tokens=tokens
+ tokens=tokens,
)
# transform usage
@@ -136,7 +169,7 @@ def _calc_response_usage(
price_unit=input_price_info.unit,
total_price=input_price_info.total_amount,
currency=input_price_info.currency,
- latency=time.perf_counter() - self.started_at
+ latency=time.perf_counter() - self.started_at,
)
return usage
diff --git a/api/core/model_runtime/model_providers/upstage/llm/_position.yaml b/api/core/model_runtime/model_providers/upstage/llm/_position.yaml
index d4f03e1988f8b8..7992843dcb1d1d 100644
--- a/api/core/model_runtime/model_providers/upstage/llm/_position.yaml
+++ b/api/core/model_runtime/model_providers/upstage/llm/_position.yaml
@@ -1 +1 @@
-- soloar-1-mini-chat
+- solar-1-mini-chat
diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py
index bc7f29cf6ea538..e345663d36efcb 100644
--- a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py
+++ b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py
@@ -140,8 +140,9 @@ class ErnieBotModel:
'ernie-lite-8k-0308': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k',
'ernie-character-8k': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k',
'ernie-character-8k-0321': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k',
- 'ernie-4.0-tutbo-8k': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k',
- 'ernie-4.0-tutbo-8k-preview': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k-preview',
+ 'ernie-4.0-turbo-8k': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k',
+ 'ernie-4.0-turbo-8k-preview': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k-preview',
+ 'yi_34b_chat': 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/yi_34b_chat',
}
function_calling_supports = [
@@ -154,7 +155,8 @@ class ErnieBotModel:
'ernie-3.5-128k',
'ernie-4.0-8k',
'ernie-4.0-turbo-8k',
- 'ernie-4.0-turbo-8k-preview'
+ 'ernie-4.0-turbo-8k-preview',
+ 'yi_34b_chat'
]
api_key: str = ''
diff --git a/api/core/model_runtime/model_providers/wenxin/llm/yi_34b_chat.yaml b/api/core/model_runtime/model_providers/wenxin/llm/yi_34b_chat.yaml
new file mode 100644
index 00000000000000..0b247fbd223dac
--- /dev/null
+++ b/api/core/model_runtime/model_providers/wenxin/llm/yi_34b_chat.yaml
@@ -0,0 +1,30 @@
+model: yi_34b_chat
+label:
+ en_US: yi_34b_chat
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 32000
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ min: 0.1
+ max: 1.0
+ default: 0.95
+ - name: top_p
+ use_template: top_p
+ min: 0
+ max: 1.0
+ default: 0.7
+ - name: max_tokens
+ use_template: max_tokens
+ default: 4096
+ min: 2
+ max: 4096
+ - name: presence_penalty
+ use_template: presence_penalty
+ default: 1.0
+ min: 1.0
+ max: 2.0
diff --git a/api/core/model_runtime/model_providers/zhinao/__init__.py b/api/core/model_runtime/model_providers/zhinao/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/zhinao/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/zhinao/_assets/icon_l_en.svg
new file mode 100644
index 00000000000000..b22b8694419bc7
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/_assets/icon_l_en.svg
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/api/core/model_runtime/model_providers/zhinao/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/zhinao/_assets/icon_s_en.svg
new file mode 100644
index 00000000000000..8fe72b7d0928e6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/_assets/icon_s_en.svg
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo-responsibility-8k.yaml b/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo-responsibility-8k.yaml
new file mode 100644
index 00000000000000..f420df0001b3a2
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo-responsibility-8k.yaml
@@ -0,0 +1,36 @@
+model: 360gpt-turbo-responsibility-8k
+label:
+ zh_Hans: 360gpt-turbo-responsibility-8k
+ en_US: 360gpt-turbo-responsibility-8k
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 8192
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ min: 0
+ max: 1
+ default: 0.5
+ - name: top_p
+ use_template: top_p
+ min: 0
+ max: 1
+ default: 1
+ - name: max_tokens
+ use_template: max_tokens
+ min: 1
+ max: 8192
+ default: 1024
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ min: -2
+ max: 2
+ default: 0
+ - name: presence_penalty
+ use_template: presence_penalty
+ min: -2
+ max: 2
+ default: 0
diff --git a/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo.yaml b/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo.yaml
new file mode 100644
index 00000000000000..a2658fbe4f5c0e
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo.yaml
@@ -0,0 +1,36 @@
+model: 360gpt-turbo
+label:
+ zh_Hans: 360gpt-turbo
+ en_US: 360gpt-turbo
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 2048
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ min: 0
+ max: 1
+ default: 0.5
+ - name: top_p
+ use_template: top_p
+ min: 0
+ max: 1
+ default: 1
+ - name: max_tokens
+ use_template: max_tokens
+ min: 1
+ max: 2048
+ default: 1024
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ min: -2
+ max: 2
+ default: 0
+ - name: presence_penalty
+ use_template: presence_penalty
+ min: -2
+ max: 2
+ default: 0
diff --git a/api/core/model_runtime/model_providers/zhinao/llm/360gpt2-pro.yaml b/api/core/model_runtime/model_providers/zhinao/llm/360gpt2-pro.yaml
new file mode 100644
index 00000000000000..00c81eb1daaffb
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/llm/360gpt2-pro.yaml
@@ -0,0 +1,36 @@
+model: 360gpt2-pro
+label:
+ zh_Hans: 360gpt2-pro
+ en_US: 360gpt2-pro
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 2048
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ min: 0
+ max: 1
+ default: 0.5
+ - name: top_p
+ use_template: top_p
+ min: 0
+ max: 1
+ default: 1
+ - name: max_tokens
+ use_template: max_tokens
+ min: 1
+ max: 2048
+ default: 1024
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ min: -2
+ max: 2
+ default: 0
+ - name: presence_penalty
+ use_template: presence_penalty
+ min: -2
+ max: 2
+ default: 0
diff --git a/api/core/model_runtime/model_providers/zhinao/llm/__init__.py b/api/core/model_runtime/model_providers/zhinao/llm/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/model_runtime/model_providers/zhinao/llm/_position.yaml b/api/core/model_runtime/model_providers/zhinao/llm/_position.yaml
new file mode 100644
index 00000000000000..ab8dbf51821c0a
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/llm/_position.yaml
@@ -0,0 +1,3 @@
+- 360gpt2-pro
+- 360gpt-turbo
+- 360gpt-turbo-responsibility-8k
diff --git a/api/core/model_runtime/model_providers/zhinao/llm/llm.py b/api/core/model_runtime/model_providers/zhinao/llm/llm.py
new file mode 100644
index 00000000000000..6930a5ed0134b0
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/llm/llm.py
@@ -0,0 +1,25 @@
+from collections.abc import Generator
+from typing import Optional, Union
+
+from core.model_runtime.entities.llm_entities import LLMResult
+from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
+from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
+
+
+class ZhinaoLargeLanguageModel(OAIAPICompatLargeLanguageModel):
+ def _invoke(self, model: str, credentials: dict,
+ prompt_messages: list[PromptMessage], model_parameters: dict,
+ tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
+ stream: bool = True, user: Optional[str] = None) \
+ -> Union[LLMResult, Generator]:
+ self._add_custom_parameters(credentials)
+ return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream)
+
+ def validate_credentials(self, model: str, credentials: dict) -> None:
+ self._add_custom_parameters(credentials)
+ super().validate_credentials(model, credentials)
+
+ @classmethod
+ def _add_custom_parameters(cls, credentials: dict) -> None:
+ credentials['mode'] = 'chat'
+ credentials['endpoint_url'] = 'https://api.360.cn/v1'
diff --git a/api/core/model_runtime/model_providers/zhinao/zhinao.py b/api/core/model_runtime/model_providers/zhinao/zhinao.py
new file mode 100644
index 00000000000000..44b36c9f51edd7
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/zhinao.py
@@ -0,0 +1,32 @@
+import logging
+
+from core.model_runtime.entities.model_entities import ModelType
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.model_provider import ModelProvider
+
+logger = logging.getLogger(__name__)
+
+
+class ZhinaoProvider(ModelProvider):
+
+ def validate_provider_credentials(self, credentials: dict) -> None:
+ """
+ Validate provider credentials
+ if validate failed, raise exception
+
+ :param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
+ """
+ try:
+ model_instance = self.get_model_instance(ModelType.LLM)
+
+ # Use `360gpt-turbo` model for validate,
+ # no matter what model you pass in, text completion model or chat model
+ model_instance.validate_credentials(
+ model='360gpt-turbo',
+ credentials=credentials
+ )
+ except CredentialsValidateFailedError as ex:
+ raise ex
+ except Exception as ex:
+ logger.exception(f'{self.get_provider_schema().provider} credentials validate failed')
+ raise ex
diff --git a/api/core/model_runtime/model_providers/zhinao/zhinao.yaml b/api/core/model_runtime/model_providers/zhinao/zhinao.yaml
new file mode 100644
index 00000000000000..c5cb142c47d4c3
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhinao/zhinao.yaml
@@ -0,0 +1,32 @@
+provider: zhinao
+label:
+ en_US: 360 AI
+ zh_Hans: 360 智脑
+description:
+ en_US: Models provided by 360 AI.
+ zh_Hans: 360 智脑提供的模型。
+icon_small:
+ en_US: icon_s_en.svg
+icon_large:
+ en_US: icon_l_en.svg
+background: "#e3f0ff"
+help:
+ title:
+ en_US: Get your API Key from 360 AI.
+ zh_Hans: 从360 智脑获取 API Key
+ url:
+ en_US: https://ai.360.com/platform/keys
+supported_model_types:
+ - llm
+configurate_methods:
+ - predefined-model
+provider_credential_schema:
+ credential_form_schemas:
+ - variable: api_key
+ label:
+ en_US: API Key
+ type: secret-input
+ required: true
+ placeholder:
+ zh_Hans: 在此输入您的 API Key
+ en_US: Enter your API Key
diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml
index 3968e8f26822ab..8391278e4f1ea3 100644
--- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml
+++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml
@@ -37,3 +37,8 @@ parameter_rules:
default: 1024
min: 1
max: 8192
+pricing:
+ input: '0.1'
+ output: '0.1'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml
index ae2d5e5d533d1a..7caebd3e4b6aa8 100644
--- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml
+++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml
@@ -37,3 +37,8 @@ parameter_rules:
default: 1024
min: 1
max: 8192
+pricing:
+ input: '0.001'
+ output: '0.001'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml
index c0038a1ab223df..dc123913deb8b5 100644
--- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml
+++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml
@@ -37,3 +37,8 @@ parameter_rules:
default: 1024
min: 1
max: 8192
+pricing:
+ input: '0.01'
+ output: '0.01'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml
index 650f9faee6ea0d..1b1d499ba7383c 100644
--- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml
+++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml
@@ -37,3 +37,8 @@ parameter_rules:
default: 1024
min: 1
max: 8192
+pricing:
+ input: '0.0001'
+ output: '0.0001'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml
new file mode 100644
index 00000000000000..9d92e58f6cdff1
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml
@@ -0,0 +1,33 @@
+model: glm-4-long
+label:
+ en_US: glm-4-long
+model_type: llm
+features:
+ - multi-tool-call
+ - agent-thought
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 10240
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ default: 0.95
+ min: 0.0
+ max: 1.0
+ help:
+ zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
+ en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
+ - name: top_p
+ use_template: top_p
+ default: 0.7
+ min: 0.0
+ max: 1.0
+ help:
+ zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。
+ en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time.
+ - name: max_tokens
+ use_template: max_tokens
+ default: 1024
+ min: 1
+ max: 4096
diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml b/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml
index faf0f818c4a782..f1b8b356028e25 100644
--- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml
+++ b/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml
@@ -1,4 +1,8 @@
model: embedding-2
model_type: text-embedding
model_properties:
- context_size: 512
+ context_size: 8192
+pricing:
+ input: '0.0005'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-3.yaml b/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-3.yaml
new file mode 100644
index 00000000000000..5c55c911c4bdd1
--- /dev/null
+++ b/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-3.yaml
@@ -0,0 +1,8 @@
+model: embedding-3
+model_type: text-embedding
+model_properties:
+ context_size: 8192
+pricing:
+ input: '0.0005'
+ unit: '0.001'
+ currency: RMB
diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py
index c5dd88fb2458b1..8157b300b1f6c7 100644
--- a/api/core/moderation/input_moderation.py
+++ b/api/core/moderation/input_moderation.py
@@ -4,7 +4,8 @@
from core.app.app_config.entities import AppConfig
from core.moderation.base import ModerationAction, ModerationException
from core.moderation.factory import ModerationFactory
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.ops.utils import measure_time
logger = logging.getLogger(__name__)
diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py
index db7e0806ee8d74..a1443f0691233b 100644
--- a/api/core/ops/entities/trace_entity.py
+++ b/api/core/ops/entities/trace_entity.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from enum import Enum
from typing import Any, Optional, Union
from pydantic import BaseModel, ConfigDict, field_validator
@@ -105,4 +106,15 @@ class GenerateNameTraceInfo(BaseTraceInfo):
'DatasetRetrievalTraceInfo': DatasetRetrievalTraceInfo,
'ToolTraceInfo': ToolTraceInfo,
'GenerateNameTraceInfo': GenerateNameTraceInfo,
-}
\ No newline at end of file
+}
+
+
+class TraceTaskName(str, Enum):
+ CONVERSATION_TRACE = 'conversation'
+ WORKFLOW_TRACE = 'workflow'
+ MESSAGE_TRACE = 'message'
+ MODERATION_TRACE = 'moderation'
+ SUGGESTED_QUESTION_TRACE = 'suggested_question'
+ DATASET_RETRIEVAL_TRACE = 'dataset_retrieval'
+ TOOL_TRACE = 'tool'
+ GENERATE_NAME_TRACE = 'generate_conversation_name'
diff --git a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py
index b90c05f4cbc605..af7661f0afc9c8 100644
--- a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py
+++ b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py
@@ -50,10 +50,11 @@ class LangfuseTrace(BaseModel):
"""
Langfuse trace model
"""
+
id: Optional[str] = Field(
default=None,
description="The id of the trace can be set, defaults to a random id. Used to link traces to external systems "
- "or when creating a distributed trace. Traces are upserted on id.",
+ "or when creating a distributed trace. Traces are upserted on id.",
)
name: Optional[str] = Field(
default=None,
@@ -68,7 +69,7 @@ class LangfuseTrace(BaseModel):
metadata: Optional[dict[str, Any]] = Field(
default=None,
description="Additional metadata of the trace. Can be any JSON object. Metadata is merged when being updated "
- "via the API.",
+ "via the API.",
)
user_id: Optional[str] = Field(
default=None,
@@ -81,22 +82,22 @@ class LangfuseTrace(BaseModel):
version: Optional[str] = Field(
default=None,
description="The version of the trace type. Used to understand how changes to the trace type affect metrics. "
- "Useful in debugging.",
+ "Useful in debugging.",
)
release: Optional[str] = Field(
default=None,
description="The release identifier of the current deployment. Used to understand how changes of different "
- "deployments affect metrics. Useful in debugging.",
+ "deployments affect metrics. Useful in debugging.",
)
tags: Optional[list[str]] = Field(
default=None,
description="Tags are used to categorize or label traces. Traces can be filtered by tags in the UI and GET "
- "API. Tags can also be changed in the UI. Tags are merged and never deleted via the API.",
+ "API. Tags can also be changed in the UI. Tags are merged and never deleted via the API.",
)
public: Optional[bool] = Field(
default=None,
description="You can make a trace public to share it via a public link. This allows others to view the trace "
- "without needing to log in or be members of your Langfuse project.",
+ "without needing to log in or be members of your Langfuse project.",
)
@field_validator("input", "output")
@@ -109,6 +110,7 @@ class LangfuseSpan(BaseModel):
"""
Langfuse span model
"""
+
id: Optional[str] = Field(
default=None,
description="The id of the span can be set, otherwise a random id is generated. Spans are upserted on id.",
@@ -140,17 +142,17 @@ class LangfuseSpan(BaseModel):
metadata: Optional[dict[str, Any]] = Field(
default=None,
description="Additional metadata of the span. Can be any JSON object. Metadata is merged when being updated "
- "via the API.",
+ "via the API.",
)
level: Optional[str] = Field(
default=None,
description="The level of the span. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering of "
- "traces with elevated error levels and for highlighting in the UI.",
+ "traces with elevated error levels and for highlighting in the UI.",
)
status_message: Optional[str] = Field(
default=None,
description="The status message of the span. Additional field for context of the event. E.g. the error "
- "message of an error event.",
+ "message of an error event.",
)
input: Optional[Union[str, dict[str, Any], list, None]] = Field(
default=None, description="The input of the span. Can be any JSON object."
@@ -161,7 +163,7 @@ class LangfuseSpan(BaseModel):
version: Optional[str] = Field(
default=None,
description="The version of the span type. Used to understand how changes to the span type affect metrics. "
- "Useful in debugging.",
+ "Useful in debugging.",
)
parent_observation_id: Optional[str] = Field(
default=None,
@@ -185,10 +187,9 @@ class UnitEnum(str, Enum):
class GenerationUsage(BaseModel):
promptTokens: Optional[int] = None
completionTokens: Optional[int] = None
- totalTokens: Optional[int] = None
+ total: Optional[int] = None
input: Optional[int] = None
output: Optional[int] = None
- total: Optional[int] = None
unit: Optional[UnitEnum] = None
inputCost: Optional[float] = None
outputCost: Optional[float] = None
@@ -224,15 +225,13 @@ class LangfuseGeneration(BaseModel):
completion_start_time: Optional[datetime | str] = Field(
default=None,
description="The time at which the completion started (streaming). Set it to get latency analytics broken "
- "down into time until completion started and completion duration.",
+ "down into time until completion started and completion duration.",
)
end_time: Optional[datetime | str] = Field(
default=None,
description="The time at which the generation ended. Automatically set by generation.end().",
)
- model: Optional[str] = Field(
- default=None, description="The name of the model used for the generation."
- )
+ model: Optional[str] = Field(default=None, description="The name of the model used for the generation.")
model_parameters: Optional[dict[str, Any]] = Field(
default=None,
description="The parameters of the model used for the generation; can be any key-value pairs.",
@@ -248,27 +247,27 @@ class LangfuseGeneration(BaseModel):
usage: Optional[GenerationUsage] = Field(
default=None,
description="The usage object supports the OpenAi structure with tokens and a more generic version with "
- "detailed costs and units.",
+ "detailed costs and units.",
)
metadata: Optional[dict[str, Any]] = Field(
default=None,
description="Additional metadata of the generation. Can be any JSON object. Metadata is merged when being "
- "updated via the API.",
+ "updated via the API.",
)
level: Optional[LevelEnum] = Field(
default=None,
description="The level of the generation. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering "
- "of traces with elevated error levels and for highlighting in the UI.",
+ "of traces with elevated error levels and for highlighting in the UI.",
)
status_message: Optional[str] = Field(
default=None,
description="The status message of the generation. Additional field for context of the event. E.g. the error "
- "message of an error event.",
+ "message of an error event.",
)
version: Optional[str] = Field(
default=None,
description="The version of the generation type. Used to understand how changes to the span type affect "
- "metrics. Useful in debugging.",
+ "metrics. Useful in debugging.",
)
model_config = ConfigDict(protected_namespaces=())
@@ -277,4 +276,3 @@ class LangfuseGeneration(BaseModel):
def ensure_dict(cls, v, info: ValidationInfo):
field_name = info.field_name
return validate_input_output(v, field_name)
-
diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py
index c520fe2aa9c089..698398e0cb8c16 100644
--- a/api/core/ops/langfuse_trace/langfuse_trace.py
+++ b/api/core/ops/langfuse_trace/langfuse_trace.py
@@ -16,6 +16,7 @@
ModerationTraceInfo,
SuggestedQuestionTraceInfo,
ToolTraceInfo,
+ TraceTaskName,
WorkflowTraceInfo,
)
from core.ops.langfuse_trace.entities.langfuse_trace_entity import (
@@ -68,9 +69,9 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
user_id = trace_info.metadata.get("user_id")
if trace_info.message_id:
trace_id = trace_info.message_id
- name = f"message_{trace_info.message_id}"
+ name = TraceTaskName.MESSAGE_TRACE.value
trace_data = LangfuseTrace(
- id=trace_info.message_id,
+ id=trace_id,
user_id=user_id,
name=name,
input=trace_info.workflow_run_inputs,
@@ -78,11 +79,13 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
metadata=trace_info.metadata,
session_id=trace_info.conversation_id,
tags=["message", "workflow"],
+ created_at=trace_info.start_time,
+ updated_at=trace_info.end_time,
)
self.add_trace(langfuse_trace_data=trace_data)
workflow_span_data = LangfuseSpan(
- id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
- name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
+ id=(trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id),
+ name=TraceTaskName.WORKFLOW_TRACE.value,
input=trace_info.workflow_run_inputs,
output=trace_info.workflow_run_outputs,
trace_id=trace_id,
@@ -97,7 +100,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
trace_data = LangfuseTrace(
id=trace_id,
user_id=user_id,
- name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
+ name=TraceTaskName.WORKFLOW_TRACE.value,
input=trace_info.workflow_run_inputs,
output=trace_info.workflow_run_outputs,
metadata=trace_info.metadata,
@@ -134,14 +137,12 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
node_type = node_execution.node_type
status = node_execution.status
if node_type == "llm":
- inputs = json.loads(node_execution.process_data).get(
- "prompts", {}
- ) if node_execution.process_data else {}
+ inputs = (
+ json.loads(node_execution.process_data).get("prompts", {}) if node_execution.process_data else {}
+ )
else:
inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
- outputs = (
- json.loads(node_execution.outputs) if node_execution.outputs else {}
- )
+ outputs = json.loads(node_execution.outputs) if node_execution.outputs else {}
created_at = node_execution.created_at if node_execution.created_at else datetime.now()
elapsed_time = node_execution.elapsed_time
finished_at = created_at + timedelta(seconds=elapsed_time)
@@ -163,28 +164,30 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
if trace_info.message_id:
span_data = LangfuseSpan(
id=node_execution_id,
- name=f"{node_name}_{node_execution_id}",
+ name=node_type,
input=inputs,
output=outputs,
trace_id=trace_id,
start_time=created_at,
end_time=finished_at,
metadata=metadata,
- level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR,
+ level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR),
status_message=trace_info.error if trace_info.error else "",
- parent_observation_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
+ parent_observation_id=(
+ trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id
+ ),
)
else:
span_data = LangfuseSpan(
id=node_execution_id,
- name=f"{node_name}_{node_execution_id}",
+ name=node_type,
input=inputs,
output=outputs,
trace_id=trace_id,
start_time=created_at,
end_time=finished_at,
metadata=metadata,
- level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR,
+ level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR),
status_message=trace_info.error if trace_info.error else "",
)
@@ -195,11 +198,11 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
total_token = metadata.get("total_tokens", 0)
# add generation
generation_usage = GenerationUsage(
- totalTokens=total_token,
+ total=total_token,
)
node_generation_data = LangfuseGeneration(
- name=f"generation_{node_execution_id}",
+ name="llm",
trace_id=trace_id,
parent_observation_id=node_execution_id,
start_time=created_at,
@@ -207,16 +210,14 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
input=inputs,
output=outputs,
metadata=metadata,
- level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR,
+ level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR),
status_message=trace_info.error if trace_info.error else "",
usage=generation_usage,
)
self.add_generation(langfuse_generation_data=node_generation_data)
- def message_trace(
- self, trace_info: MessageTraceInfo, **kwargs
- ):
+ def message_trace(self, trace_info: MessageTraceInfo, **kwargs):
# get message file data
file_list = trace_info.file_list
metadata = trace_info.metadata
@@ -225,9 +226,9 @@ def message_trace(
user_id = message_data.from_account_id
if message_data.from_end_user_id:
- end_user_data: EndUser = db.session.query(EndUser).filter(
- EndUser.id == message_data.from_end_user_id
- ).first()
+ end_user_data: EndUser = (
+ db.session.query(EndUser).filter(EndUser.id == message_data.from_end_user_id).first()
+ )
if end_user_data is not None:
user_id = end_user_data.session_id
metadata["user_id"] = user_id
@@ -235,7 +236,7 @@ def message_trace(
trace_data = LangfuseTrace(
id=message_id,
user_id=user_id,
- name=f"message_{message_id}",
+ name=TraceTaskName.MESSAGE_TRACE.value,
input={
"message": trace_info.inputs,
"files": file_list,
@@ -258,7 +259,6 @@ def message_trace(
# start add span
generation_usage = GenerationUsage(
- totalTokens=trace_info.total_tokens,
input=trace_info.message_tokens,
output=trace_info.answer_tokens,
total=trace_info.total_tokens,
@@ -267,7 +267,7 @@ def message_trace(
)
langfuse_generation_data = LangfuseGeneration(
- name=f"generation_{message_id}",
+ name="llm",
trace_id=message_id,
start_time=trace_info.start_time,
end_time=trace_info.end_time,
@@ -275,7 +275,7 @@ def message_trace(
input=trace_info.inputs,
output=message_data.answer,
metadata=metadata,
- level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR,
+ level=(LevelEnum.DEFAULT if message_data.status != "error" else LevelEnum.ERROR),
status_message=message_data.error if message_data.error else "",
usage=generation_usage,
)
@@ -284,7 +284,7 @@ def message_trace(
def moderation_trace(self, trace_info: ModerationTraceInfo):
span_data = LangfuseSpan(
- name="moderation",
+ name=TraceTaskName.MODERATION_TRACE.value,
input=trace_info.inputs,
output={
"action": trace_info.action,
@@ -303,22 +303,21 @@ def moderation_trace(self, trace_info: ModerationTraceInfo):
def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
message_data = trace_info.message_data
generation_usage = GenerationUsage(
- totalTokens=len(str(trace_info.suggested_question)),
+ total=len(str(trace_info.suggested_question)),
input=len(trace_info.inputs),
output=len(trace_info.suggested_question),
- total=len(trace_info.suggested_question),
unit=UnitEnum.CHARACTERS,
)
generation_data = LangfuseGeneration(
- name="suggested_question",
+ name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value,
input=trace_info.inputs,
output=str(trace_info.suggested_question),
trace_id=trace_info.message_id,
start_time=trace_info.start_time,
end_time=trace_info.end_time,
metadata=trace_info.metadata,
- level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR,
+ level=(LevelEnum.DEFAULT if message_data.status != "error" else LevelEnum.ERROR),
status_message=message_data.error if message_data.error else "",
usage=generation_usage,
)
@@ -327,7 +326,7 @@ def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo):
dataset_retrieval_span_data = LangfuseSpan(
- name="dataset_retrieval",
+ name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value,
input=trace_info.inputs,
output={"documents": trace_info.documents},
trace_id=trace_info.message_id,
@@ -347,7 +346,7 @@ def tool_trace(self, trace_info: ToolTraceInfo):
start_time=trace_info.start_time,
end_time=trace_info.end_time,
metadata=trace_info.metadata,
- level=LevelEnum.DEFAULT if trace_info.error == "" or trace_info.error is None else LevelEnum.ERROR,
+ level=(LevelEnum.DEFAULT if trace_info.error == "" or trace_info.error is None else LevelEnum.ERROR),
status_message=trace_info.error,
)
@@ -355,7 +354,7 @@ def tool_trace(self, trace_info: ToolTraceInfo):
def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
name_generation_trace_data = LangfuseTrace(
- name="generate_name",
+ name=TraceTaskName.GENERATE_NAME_TRACE.value,
input=trace_info.inputs,
output=trace_info.outputs,
user_id=trace_info.tenant_id,
@@ -366,7 +365,7 @@ def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
self.add_trace(langfuse_trace_data=name_generation_trace_data)
name_generation_span_data = LangfuseSpan(
- name="generate_name",
+ name=TraceTaskName.GENERATE_NAME_TRACE.value,
input=trace_info.inputs,
output=trace_info.outputs,
trace_id=trace_info.conversation_id,
@@ -377,9 +376,7 @@ def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
self.add_span(langfuse_span_data=name_generation_span_data)
def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None):
- format_trace_data = (
- filter_none_values(langfuse_trace_data.model_dump()) if langfuse_trace_data else {}
- )
+ format_trace_data = filter_none_values(langfuse_trace_data.model_dump()) if langfuse_trace_data else {}
try:
self.langfuse_client.trace(**format_trace_data)
logger.debug("LangFuse Trace created successfully")
@@ -387,9 +384,7 @@ def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None):
raise ValueError(f"LangFuse Failed to create trace: {str(e)}")
def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None):
- format_span_data = (
- filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {}
- )
+ format_span_data = filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {}
try:
self.langfuse_client.span(**format_span_data)
logger.debug("LangFuse Span created successfully")
@@ -397,19 +392,13 @@ def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None):
raise ValueError(f"LangFuse Failed to create span: {str(e)}")
def update_span(self, span, langfuse_span_data: Optional[LangfuseSpan] = None):
- format_span_data = (
- filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {}
- )
+ format_span_data = filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {}
span.end(**format_span_data)
- def add_generation(
- self, langfuse_generation_data: Optional[LangfuseGeneration] = None
- ):
+ def add_generation(self, langfuse_generation_data: Optional[LangfuseGeneration] = None):
format_generation_data = (
- filter_none_values(langfuse_generation_data.model_dump())
- if langfuse_generation_data
- else {}
+ filter_none_values(langfuse_generation_data.model_dump()) if langfuse_generation_data else {}
)
try:
self.langfuse_client.generation(**format_generation_data)
@@ -417,13 +406,9 @@ def add_generation(
except Exception as e:
raise ValueError(f"LangFuse Failed to create generation: {str(e)}")
- def update_generation(
- self, generation, langfuse_generation_data: Optional[LangfuseGeneration] = None
- ):
+ def update_generation(self, generation, langfuse_generation_data: Optional[LangfuseGeneration] = None):
format_generation_data = (
- filter_none_values(langfuse_generation_data.model_dump())
- if langfuse_generation_data
- else {}
+ filter_none_values(langfuse_generation_data.model_dump()) if langfuse_generation_data else {}
)
generation.end(**format_generation_data)
diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py
index 0ce91db335cd94..fde8a06c612dd9 100644
--- a/api/core/ops/langsmith_trace/langsmith_trace.py
+++ b/api/core/ops/langsmith_trace/langsmith_trace.py
@@ -15,6 +15,7 @@
ModerationTraceInfo,
SuggestedQuestionTraceInfo,
ToolTraceInfo,
+ TraceTaskName,
WorkflowTraceInfo,
)
from core.ops.langsmith_trace.entities.langsmith_trace_entity import (
@@ -39,9 +40,7 @@ def __init__(
self.langsmith_key = langsmith_config.api_key
self.project_name = langsmith_config.project
self.project_id = None
- self.langsmith_client = Client(
- api_key=langsmith_config.api_key, api_url=langsmith_config.endpoint
- )
+ self.langsmith_client = Client(api_key=langsmith_config.api_key, api_url=langsmith_config.endpoint)
self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001")
def trace(self, trace_info: BaseTraceInfo):
@@ -64,7 +63,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
if trace_info.message_id:
message_run = LangSmithRunModel(
id=trace_info.message_id,
- name=f"message_{trace_info.message_id}",
+ name=TraceTaskName.MESSAGE_TRACE.value,
inputs=trace_info.workflow_run_inputs,
outputs=trace_info.workflow_run_outputs,
run_type=LangSmithRunType.chain,
@@ -73,8 +72,8 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
extra={
"metadata": trace_info.metadata,
},
- tags=["message"],
- error=trace_info.error
+ tags=["message", "workflow"],
+ error=trace_info.error,
)
self.add_run(message_run)
@@ -82,7 +81,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
file_list=trace_info.file_list,
total_tokens=trace_info.total_tokens,
id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
- name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}",
+ name=TraceTaskName.WORKFLOW_TRACE.value,
inputs=trace_info.workflow_run_inputs,
run_type=LangSmithRunType.tool,
start_time=trace_info.workflow_data.created_at,
@@ -126,22 +125,18 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
node_type = node_execution.node_type
status = node_execution.status
if node_type == "llm":
- inputs = json.loads(node_execution.process_data).get(
- "prompts", {}
- ) if node_execution.process_data else {}
+ inputs = (
+ json.loads(node_execution.process_data).get("prompts", {}) if node_execution.process_data else {}
+ )
else:
inputs = json.loads(node_execution.inputs) if node_execution.inputs else {}
- outputs = (
- json.loads(node_execution.outputs) if node_execution.outputs else {}
- )
+ outputs = json.loads(node_execution.outputs) if node_execution.outputs else {}
created_at = node_execution.created_at if node_execution.created_at else datetime.now()
elapsed_time = node_execution.elapsed_time
finished_at = created_at + timedelta(seconds=elapsed_time)
execution_metadata = (
- json.loads(node_execution.execution_metadata)
- if node_execution.execution_metadata
- else {}
+ json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {}
)
node_total_tokens = execution_metadata.get("total_tokens", 0)
@@ -168,7 +163,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
langsmith_run = LangSmithRunModel(
total_tokens=node_total_tokens,
- name=f"{node_name}_{node_execution_id}",
+ name=node_type,
inputs=inputs,
run_type=run_type,
start_time=created_at,
@@ -178,7 +173,9 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo):
extra={
"metadata": metadata,
},
- parent_run_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id,
+ parent_run_id=trace_info.workflow_app_log_id
+ if trace_info.workflow_app_log_id
+ else trace_info.workflow_run_id,
tags=["node_execution"],
)
@@ -198,9 +195,9 @@ def message_trace(self, trace_info: MessageTraceInfo):
metadata["user_id"] = user_id
if message_data.from_end_user_id:
- end_user_data: EndUser = db.session.query(EndUser).filter(
- EndUser.id == message_data.from_end_user_id
- ).first()
+ end_user_data: EndUser = (
+ db.session.query(EndUser).filter(EndUser.id == message_data.from_end_user_id).first()
+ )
if end_user_data is not None:
end_user_id = end_user_data.session_id
metadata["end_user_id"] = end_user_id
@@ -210,7 +207,7 @@ def message_trace(self, trace_info: MessageTraceInfo):
output_tokens=trace_info.answer_tokens,
total_tokens=trace_info.total_tokens,
id=message_id,
- name=f"message_{message_id}",
+ name=TraceTaskName.MESSAGE_TRACE.value,
inputs=trace_info.inputs,
run_type=LangSmithRunType.chain,
start_time=trace_info.start_time,
@@ -230,7 +227,7 @@ def message_trace(self, trace_info: MessageTraceInfo):
input_tokens=trace_info.message_tokens,
output_tokens=trace_info.answer_tokens,
total_tokens=trace_info.total_tokens,
- name=f"llm_{message_id}",
+ name="llm",
inputs=trace_info.inputs,
run_type=LangSmithRunType.llm,
start_time=trace_info.start_time,
@@ -248,7 +245,7 @@ def message_trace(self, trace_info: MessageTraceInfo):
def moderation_trace(self, trace_info: ModerationTraceInfo):
langsmith_run = LangSmithRunModel(
- name="moderation",
+ name=TraceTaskName.MODERATION_TRACE.value,
inputs=trace_info.inputs,
outputs={
"action": trace_info.action,
@@ -271,7 +268,7 @@ def moderation_trace(self, trace_info: ModerationTraceInfo):
def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
message_data = trace_info.message_data
suggested_question_run = LangSmithRunModel(
- name="suggested_question",
+ name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value,
inputs=trace_info.inputs,
outputs=trace_info.suggested_question,
run_type=LangSmithRunType.tool,
@@ -288,7 +285,7 @@ def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo):
def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo):
dataset_retrieval_run = LangSmithRunModel(
- name="dataset_retrieval",
+ name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value,
inputs=trace_info.inputs,
outputs={"documents": trace_info.documents},
run_type=LangSmithRunType.retriever,
@@ -323,7 +320,7 @@ def tool_trace(self, trace_info: ToolTraceInfo):
def generate_name_trace(self, trace_info: GenerateNameTraceInfo):
name_run = LangSmithRunModel(
- name="generate_name",
+ name=TraceTaskName.GENERATE_NAME_TRACE.value,
inputs=trace_info.inputs,
outputs=trace_info.outputs,
run_type=LangSmithRunType.tool,
diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py
index 61279e3f5f29c5..068b490ec887bd 100644
--- a/api/core/ops/ops_trace_manager.py
+++ b/api/core/ops/ops_trace_manager.py
@@ -5,7 +5,6 @@
import threading
import time
from datetime import timedelta
-from enum import Enum
from typing import Any, Optional, Union
from uuid import UUID
@@ -24,6 +23,7 @@
ModerationTraceInfo,
SuggestedQuestionTraceInfo,
ToolTraceInfo,
+ TraceTaskName,
WorkflowTraceInfo,
)
from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace
@@ -253,17 +253,6 @@ def check_trace_config_is_effective(tracing_config: dict, tracing_provider: str)
return trace_instance(tracing_config).api_check()
-class TraceTaskName(str, Enum):
- CONVERSATION_TRACE = 'conversation_trace'
- WORKFLOW_TRACE = 'workflow_trace'
- MESSAGE_TRACE = 'message_trace'
- MODERATION_TRACE = 'moderation_trace'
- SUGGESTED_QUESTION_TRACE = 'suggested_question_trace'
- DATASET_RETRIEVAL_TRACE = 'dataset_retrieval_trace'
- TOOL_TRACE = 'tool_trace'
- GENERATE_NAME_TRACE = 'generate_name_trace'
-
-
class TraceTask:
def __init__(
self,
diff --git a/api/core/prompt/simple_prompt_transform.py b/api/core/prompt/simple_prompt_transform.py
index 452b270348b2ff..fd7ed0181be2f2 100644
--- a/api/core/prompt/simple_prompt_transform.py
+++ b/api/core/prompt/simple_prompt_transform.py
@@ -1,11 +1,10 @@
import enum
import json
import os
-from typing import Optional
+from typing import TYPE_CHECKING, Optional
from core.app.app_config.entities import PromptTemplateEntity
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
-from core.file.file_obj import FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import (
PromptMessage,
@@ -18,6 +17,9 @@
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
from models.model import AppMode
+if TYPE_CHECKING:
+ from core.file.file_obj import FileVar
+
class ModelMode(enum.Enum):
COMPLETION = 'completion'
@@ -50,7 +52,7 @@ def get_prompt(self,
prompt_template_entity: PromptTemplateEntity,
inputs: dict,
query: str,
- files: list[FileVar],
+ files: list["FileVar"],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity) -> \
@@ -163,7 +165,7 @@ def _get_chat_model_prompt_messages(self, app_mode: AppMode,
inputs: dict,
query: str,
context: Optional[str],
- files: list[FileVar],
+ files: list["FileVar"],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity) \
-> tuple[list[PromptMessage], Optional[list[str]]]:
@@ -206,7 +208,7 @@ def _get_completion_model_prompt_messages(self, app_mode: AppMode,
inputs: dict,
query: str,
context: Optional[str],
- files: list[FileVar],
+ files: list["FileVar"],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity) \
-> tuple[list[PromptMessage], Optional[list[str]]]:
@@ -255,7 +257,7 @@ def _get_completion_model_prompt_messages(self, app_mode: AppMode,
return [self.get_last_user_message(prompt, files)], stops
- def get_last_user_message(self, prompt: str, files: list[FileVar]) -> UserPromptMessage:
+ def get_last_user_message(self, prompt: str, files: list["FileVar"]) -> UserPromptMessage:
if files:
prompt_message_contents = [TextPromptMessageContent(data=prompt)]
for file in files:
diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py
index abbf4a35a4628a..3932e90042c59c 100644
--- a/api/core/rag/datasource/retrieval_service.py
+++ b/api/core/rag/datasource/retrieval_service.py
@@ -28,7 +28,7 @@ class RetrievalService:
@classmethod
def retrieve(cls, retrival_method: str, dataset_id: str, query: str,
top_k: int, score_threshold: Optional[float] = .0,
- reranking_model: Optional[dict] = None, reranking_mode: Optional[str] = None,
+ reranking_model: Optional[dict] = None, reranking_mode: Optional[str] = 'reranking_model',
weights: Optional[dict] = None):
dataset = db.session.query(Dataset).filter(
Dataset.id == dataset_id
@@ -36,10 +36,6 @@ def retrieve(cls, retrival_method: str, dataset_id: str, query: str,
if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0:
return []
all_documents = []
- keyword_search_documents = []
- embedding_search_documents = []
- full_text_search_documents = []
- hybrid_search_documents = []
threads = []
exceptions = []
# retrieval_model source with keyword
diff --git a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py
index 442d71293f632d..b78e2a59b1eb6f 100644
--- a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py
+++ b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py
@@ -65,8 +65,15 @@ def __init__(self, collection_name: str, config: AnalyticdbConfig):
AnalyticdbVector._init = True
def _initialize(self) -> None:
- self._initialize_vector_database()
- self._create_namespace_if_not_exists()
+ cache_key = f"vector_indexing_{self.config.instance_id}"
+ lock_name = f"{cache_key}_lock"
+ with redis_client.lock(lock_name, timeout=20):
+ collection_exist_cache_key = f"vector_indexing_{self.config.instance_id}"
+ if redis_client.get(collection_exist_cache_key):
+ return
+ self._initialize_vector_database()
+ self._create_namespace_if_not_exists()
+ redis_client.set(collection_exist_cache_key, 1, ex=3600)
def _initialize_vector_database(self) -> None:
from alibabacloud_gpdb20160503 import models as gpdb_20160503_models
@@ -285,9 +292,11 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
documents = []
for match in response.body.matches.match:
if match.score > score_threshold:
+ metadata = json.loads(match.metadata.get("metadata_"))
doc = Document(
page_content=match.metadata.get("page_content"),
- metadata=json.loads(match.metadata.get("metadata_")),
+ vector=match.metadata.get("vector"),
+ metadata=metadata,
)
documents.append(doc)
return documents
@@ -320,7 +329,23 @@ def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings
self.gen_index_struct_dict(VectorType.ANALYTICDB, collection_name)
)
- # TODO handle optional params
+ # handle optional params
+ if dify_config.ANALYTICDB_KEY_ID is None:
+ raise ValueError("ANALYTICDB_KEY_ID should not be None")
+ if dify_config.ANALYTICDB_KEY_SECRET is None:
+ raise ValueError("ANALYTICDB_KEY_SECRET should not be None")
+ if dify_config.ANALYTICDB_REGION_ID is None:
+ raise ValueError("ANALYTICDB_REGION_ID should not be None")
+ if dify_config.ANALYTICDB_INSTANCE_ID is None:
+ raise ValueError("ANALYTICDB_INSTANCE_ID should not be None")
+ if dify_config.ANALYTICDB_ACCOUNT is None:
+ raise ValueError("ANALYTICDB_ACCOUNT should not be None")
+ if dify_config.ANALYTICDB_PASSWORD is None:
+ raise ValueError("ANALYTICDB_PASSWORD should not be None")
+ if dify_config.ANALYTICDB_NAMESPACE is None:
+ raise ValueError("ANALYTICDB_NAMESPACE should not be None")
+ if dify_config.ANALYTICDB_NAMESPACE_PASSWORD is None:
+ raise ValueError("ANALYTICDB_NAMESPACE_PASSWORD should not be None")
return AnalyticdbVector(
collection_name,
AnalyticdbConfig(
diff --git a/api/core/rag/datasource/vdb/elasticsearch/__init__.py b/api/core/rag/datasource/vdb/elasticsearch/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py
new file mode 100644
index 00000000000000..01ba6fb3248786
--- /dev/null
+++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py
@@ -0,0 +1,191 @@
+import json
+from typing import Any
+
+import requests
+from elasticsearch import Elasticsearch
+from flask import current_app
+from pydantic import BaseModel, model_validator
+
+from core.rag.datasource.entity.embedding import Embeddings
+from core.rag.datasource.vdb.vector_base import BaseVector
+from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory
+from core.rag.datasource.vdb.vector_type import VectorType
+from core.rag.models.document import Document
+from models.dataset import Dataset
+
+
+class ElasticSearchConfig(BaseModel):
+ host: str
+ port: str
+ username: str
+ password: str
+
+ @model_validator(mode='before')
+ def validate_config(cls, values: dict) -> dict:
+ if not values['host']:
+ raise ValueError("config HOST is required")
+ if not values['port']:
+ raise ValueError("config PORT is required")
+ if not values['username']:
+ raise ValueError("config USERNAME is required")
+ if not values['password']:
+ raise ValueError("config PASSWORD is required")
+ return values
+
+
+class ElasticSearchVector(BaseVector):
+ def __init__(self, index_name: str, config: ElasticSearchConfig, attributes: list):
+ super().__init__(index_name.lower())
+ self._client = self._init_client(config)
+ self._attributes = attributes
+
+ def _init_client(self, config: ElasticSearchConfig) -> Elasticsearch:
+ try:
+ client = Elasticsearch(
+ hosts=f'{config.host}:{config.port}',
+ basic_auth=(config.username, config.password),
+ request_timeout=100000,
+ retry_on_timeout=True,
+ max_retries=10000,
+ )
+ except requests.exceptions.ConnectionError:
+ raise ConnectionError("Vector database connection error")
+
+ return client
+
+ def get_type(self) -> str:
+ return 'elasticsearch'
+
+ def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs):
+ uuids = self._get_uuids(documents)
+ texts = [d.page_content for d in documents]
+ metadatas = [d.metadata for d in documents]
+
+ if not self._client.indices.exists(index=self._collection_name):
+ dim = len(embeddings[0])
+ mapping = {
+ "properties": {
+ "text": {
+ "type": "text"
+ },
+ "vector": {
+ "type": "dense_vector",
+ "index": True,
+ "dims": dim,
+ "similarity": "l2_norm"
+ },
+ }
+ }
+ self._client.indices.create(index=self._collection_name, mappings=mapping)
+
+ added_ids = []
+ for i, text in enumerate(texts):
+ self._client.index(index=self._collection_name,
+ id=uuids[i],
+ document={
+ "text": text,
+ "vector": embeddings[i] if embeddings[i] else None,
+ "metadata": metadatas[i] if metadatas[i] else {},
+ })
+ added_ids.append(uuids[i])
+
+ self._client.indices.refresh(index=self._collection_name)
+ return uuids
+
+ def text_exists(self, id: str) -> bool:
+ return self._client.exists(index=self._collection_name, id=id).__bool__()
+
+ def delete_by_ids(self, ids: list[str]) -> None:
+ for id in ids:
+ self._client.delete(index=self._collection_name, id=id)
+
+ def delete_by_metadata_field(self, key: str, value: str) -> None:
+ query_str = {
+ 'query': {
+ 'match': {
+ f'metadata.{key}': f'{value}'
+ }
+ }
+ }
+ results = self._client.search(index=self._collection_name, body=query_str)
+ ids = [hit['_id'] for hit in results['hits']['hits']]
+ if ids:
+ self.delete_by_ids(ids)
+
+ def delete(self) -> None:
+ self._client.indices.delete(index=self._collection_name)
+
+ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:
+ query_str = {
+ "query": {
+ "script_score": {
+ "query": {
+ "match_all": {}
+ },
+ "script": {
+ "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
+ "params": {
+ "query_vector": query_vector
+ }
+ }
+ }
+ }
+ }
+
+ results = self._client.search(index=self._collection_name, body=query_str)
+
+ docs_and_scores = []
+ for hit in results['hits']['hits']:
+ docs_and_scores.append(
+ (Document(page_content=hit['_source']['text'], metadata=hit['_source']['metadata']), hit['_score']))
+
+ docs = []
+ for doc, score in docs_and_scores:
+ score_threshold = kwargs.get("score_threshold", .0) if kwargs.get('score_threshold', .0) else 0.0
+ if score > score_threshold:
+ doc.metadata['score'] = score
+ docs.append(doc)
+
+ # Sort the documents by score in descending order
+ docs = sorted(docs, key=lambda x: x.metadata['score'], reverse=True)
+
+ return docs
+ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
+ query_str = {
+ "match": {
+ "text": query
+ }
+ }
+ results = self._client.search(index=self._collection_name, query=query_str)
+ docs = []
+ for hit in results['hits']['hits']:
+ docs.append(Document(page_content=hit['_source']['text'], metadata=hit['_source']['metadata']))
+
+ return docs
+
+ def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs):
+ return self.add_texts(texts, embeddings, **kwargs)
+
+
+class ElasticSearchVectorFactory(AbstractVectorFactory):
+ def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings) -> ElasticSearchVector:
+ if dataset.index_struct_dict:
+ class_prefix: str = dataset.index_struct_dict['vector_store']['class_prefix']
+ collection_name = class_prefix
+ else:
+ dataset_id = dataset.id
+ collection_name = Dataset.gen_collection_name_by_id(dataset_id)
+ dataset.index_struct = json.dumps(
+ self.gen_index_struct_dict(VectorType.ELASTICSEARCH, collection_name))
+
+ config = current_app.config
+ return ElasticSearchVector(
+ index_name=collection_name,
+ config=ElasticSearchConfig(
+ host=config.get('ELASTICSEARCH_HOST'),
+ port=config.get('ELASTICSEARCH_PORT'),
+ username=config.get('ELASTICSEARCH_USERNAME'),
+ password=config.get('ELASTICSEARCH_PASSWORD'),
+ ),
+ attributes=[]
+ )
diff --git a/api/core/rag/datasource/vdb/myscale/myscale_vector.py b/api/core/rag/datasource/vdb/myscale/myscale_vector.py
index 241b5a8414c476..4ae1a3395b0749 100644
--- a/api/core/rag/datasource/vdb/myscale/myscale_vector.py
+++ b/api/core/rag/datasource/vdb/myscale/myscale_vector.py
@@ -93,7 +93,7 @@ def add_texts(self, documents: list[Document], embeddings: list[list[float]], **
@staticmethod
def escape_str(value: Any) -> str:
- return "".join(f"\\{c}" if c in ("\\", "'") else c for c in str(value))
+ return "".join(" " if c in ("\\", "'") else c for c in str(value))
def text_exists(self, id: str) -> bool:
results = self._client.query(f"SELECT id FROM {self._config.database}.{self._collection_name} WHERE id='{id}'")
@@ -118,7 +118,7 @@ def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Doc
return self._search(f"distance(vector, {str(query_vector)})", self._vec_order, **kwargs)
def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
- return self._search(f"TextSearch(text, '{query}')", SortOrder.DESC, **kwargs)
+ return self._search(f"TextSearch('enable_nlq=false')(text, '{query}')", SortOrder.DESC, **kwargs)
def _search(self, dist: str, order: SortOrder, **kwargs: Any) -> list[Document]:
top_k = kwargs.get("top_k", 5)
@@ -126,13 +126,14 @@ def _search(self, dist: str, order: SortOrder, **kwargs: Any) -> list[Document]:
where_str = f"WHERE dist < {1 - score_threshold}" if \
self._metric.upper() == "COSINE" and order == SortOrder.ASC and score_threshold > 0.0 else ""
sql = f"""
- SELECT text, metadata, {dist} as dist FROM {self._config.database}.{self._collection_name}
+ SELECT text, vector, metadata, {dist} as dist FROM {self._config.database}.{self._collection_name}
{where_str} ORDER BY dist {order.value} LIMIT {top_k}
"""
try:
return [
Document(
page_content=r["text"],
+ vector=r['vector'],
metadata=r["metadata"],
)
for r in self._client.query(sql).named_results()
diff --git a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py
index d834e8ce14c51d..c95d202173b84d 100644
--- a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py
+++ b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py
@@ -192,7 +192,9 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
docs = []
for hit in response['hits']['hits']:
metadata = hit['_source'].get(Field.METADATA_KEY.value)
- doc = Document(page_content=hit['_source'].get(Field.CONTENT_KEY.value), metadata=metadata)
+ vector = hit['_source'].get(Field.VECTOR.value)
+ page_content = hit['_source'].get(Field.CONTENT_KEY.value)
+ doc = Document(page_content=page_content, vector=vector, metadata=metadata)
docs.append(doc)
return docs
diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py
index 4bd09b331d2839..aa2c6171c33367 100644
--- a/api/core/rag/datasource/vdb/oracle/oraclevector.py
+++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py
@@ -234,16 +234,16 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
entities.append(token)
with self._get_cursor() as cur:
cur.execute(
- f"select meta, text FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only",
+ f"select meta, text, embedding FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only",
[" ACCUM ".join(entities)]
)
docs = []
for record in cur:
- metadata, text = record
- docs.append(Document(page_content=text, metadata=metadata))
+ metadata, text, embedding = record
+ docs.append(Document(page_content=text, vector=embedding, metadata=metadata))
return docs
else:
- return [Document(page_content="", metadata="")]
+ return [Document(page_content="", metadata={})]
return []
def delete(self) -> None:
diff --git a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py
index 77c3f6a27122f4..297bff928e8ae8 100644
--- a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py
+++ b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py
@@ -399,7 +399,6 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
document = self._document_from_scored_point(
result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value
)
- document.metadata['vector'] = result.vector
documents.append(document)
return documents
@@ -418,6 +417,7 @@ def _document_from_scored_point(
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
+ vector=scored_point.vector,
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py
index fad60ecf45c151..3e9ca8e1fe7f4a 100644
--- a/api/core/rag/datasource/vdb/vector_factory.py
+++ b/api/core/rag/datasource/vdb/vector_factory.py
@@ -71,6 +71,9 @@ def get_vector_factory(vector_type: str) -> type[AbstractVectorFactory]:
case VectorType.RELYT:
from core.rag.datasource.vdb.relyt.relyt_vector import RelytVectorFactory
return RelytVectorFactory
+ case VectorType.ELASTICSEARCH:
+ from core.rag.datasource.vdb.elasticsearch.elasticsearch_vector import ElasticSearchVectorFactory
+ return ElasticSearchVectorFactory
case VectorType.TIDB_VECTOR:
from core.rag.datasource.vdb.tidb_vector.tidb_vector import TiDBVectorFactory
return TiDBVectorFactory
diff --git a/api/core/rag/datasource/vdb/vector_type.py b/api/core/rag/datasource/vdb/vector_type.py
index 77495044df562c..317ca6abc8c89d 100644
--- a/api/core/rag/datasource/vdb/vector_type.py
+++ b/api/core/rag/datasource/vdb/vector_type.py
@@ -15,3 +15,4 @@ class VectorType(str, Enum):
OPENSEARCH = 'opensearch'
TENCENT = 'tencent'
ORACLE = 'oracle'
+ ELASTICSEARCH = 'elasticsearch'
diff --git a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py
index 87fc5ff158e2d3..205fe850c35838 100644
--- a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py
+++ b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py
@@ -239,8 +239,7 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
query_obj = self._client.query.get(collection_name, properties)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
- if kwargs.get("additional"):
- query_obj = query_obj.with_additional(kwargs.get("additional"))
+ query_obj = query_obj.with_additional(["vector"])
properties = ['text']
result = query_obj.with_bm25(query=query, properties=properties).with_limit(kwargs.get('top_k', 2)).do()
if "errors" in result:
@@ -248,7 +247,8 @@ def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
docs = []
for res in result["data"]["Get"][collection_name]:
text = res.pop(Field.TEXT_KEY.value)
- docs.append(Document(page_content=text, metadata=res))
+ additional = res.pop('_additional')
+ docs.append(Document(page_content=text, vector=additional['vector'], metadata=res))
return docs
def _default_schema(self, index_name: str) -> dict:
diff --git a/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py b/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py
index 34a4e85e9720ce..0323b14a4a34fd 100644
--- a/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py
+++ b/api/core/rag/extractor/unstructured/unstructured_doc_extractor.py
@@ -25,7 +25,7 @@ def extract(self) -> list[Document]:
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
- [int(x) for x in __unstructured_version__.split(".")]
+ int(x) for x in __unstructured_version__.split(".")
)
# check the file extension
try:
diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py
index ac4a56319b5142..c3f0b75cfba5f1 100644
--- a/api/core/rag/extractor/word_extractor.py
+++ b/api/core/rag/extractor/word_extractor.py
@@ -1,9 +1,12 @@
"""Abstract interface for document loader implementations."""
import datetime
+import logging
import mimetypes
import os
+import re
import tempfile
import uuid
+import xml.etree.ElementTree as ET
from urllib.parse import urlparse
import requests
@@ -16,6 +19,7 @@
from extensions.ext_storage import storage
from models.model import UploadFile
+logger = logging.getLogger(__name__)
class WordExtractor(BaseExtractor):
"""Load docx files.
@@ -117,19 +121,63 @@ def _extract_images_from_docx(self, doc, image_folder):
return image_map
- def _table_to_markdown(self, table):
- markdown = ""
- # deal with table headers
+ def _table_to_markdown(self, table, image_map):
+ markdown = []
+ # calculate the total number of columns
+ total_cols = max(len(row.cells) for row in table.rows)
+
header_row = table.rows[0]
- headers = [cell.text for cell in header_row.cells]
- markdown += "| " + " | ".join(headers) + " |\n"
- markdown += "| " + " | ".join(["---"] * len(headers)) + " |\n"
- # deal with table rows
+ headers = self._parse_row(header_row, image_map, total_cols)
+ markdown.append("| " + " | ".join(headers) + " |")
+ markdown.append("| " + " | ".join(["---"] * total_cols) + " |")
+
for row in table.rows[1:]:
- row_cells = [cell.text for cell in row.cells]
- markdown += "| " + " | ".join(row_cells) + " |\n"
+ row_cells = self._parse_row(row, image_map, total_cols)
+ markdown.append("| " + " | ".join(row_cells) + " |")
+ return "\n".join(markdown)
- return markdown
+ def _parse_row(self, row, image_map, total_cols):
+ # Initialize a row, all of which are empty by default
+ row_cells = [""] * total_cols
+ col_index = 0
+ for cell in row.cells:
+ # make sure the col_index is not out of range
+ while col_index < total_cols and row_cells[col_index] != "":
+ col_index += 1
+ # if col_index is out of range the loop is jumped
+ if col_index >= total_cols:
+ break
+ cell_content = self._parse_cell(cell, image_map).strip()
+ cell_colspan = cell.grid_span if cell.grid_span else 1
+ for i in range(cell_colspan):
+ if col_index + i < total_cols:
+ row_cells[col_index + i] = cell_content if i == 0 else ""
+ col_index += cell_colspan
+ return row_cells
+
+ def _parse_cell(self, cell, image_map):
+ cell_content = []
+ for paragraph in cell.paragraphs:
+ parsed_paragraph = self._parse_cell_paragraph(paragraph, image_map)
+ if parsed_paragraph:
+ cell_content.append(parsed_paragraph)
+ unique_content = list(dict.fromkeys(cell_content))
+ return " ".join(unique_content)
+
+ def _parse_cell_paragraph(self, paragraph, image_map):
+ paragraph_content = []
+ for run in paragraph.runs:
+ if run.element.xpath('.//a:blip'):
+ for blip in run.element.xpath('.//a:blip'):
+ image_id = blip.get("{http://schemas.openxmlformats.org/officeDocument/2006/relationships}embed")
+ image_part = paragraph.part.rels[image_id].target_part
+
+ if image_part in image_map:
+ image_link = image_map[image_part]
+ paragraph_content.append(image_link)
+ else:
+ paragraph_content.append(run.text)
+ return "".join(paragraph_content).strip()
def _parse_paragraph(self, paragraph, image_map):
paragraph_content = []
@@ -153,10 +201,34 @@ def parse_docx(self, docx_path, image_folder):
image_map = self._extract_images_from_docx(doc, image_folder)
+ hyperlinks_url = None
+ url_pattern = re.compile(r'http://[^\s+]+//|https://[^\s+]+')
+ for para in doc.paragraphs:
+ for run in para.runs:
+ if run.text and hyperlinks_url:
+ result = f' [{run.text}]({hyperlinks_url}) '
+ run.text = result
+ hyperlinks_url = None
+ if 'HYPERLINK' in run.element.xml:
+ try:
+ xml = ET.XML(run.element.xml)
+ x_child = [c for c in xml.iter() if c is not None]
+ for x in x_child:
+ if x_child is None:
+ continue
+ if x.tag.endswith('instrText'):
+ for i in url_pattern.findall(x.text):
+ hyperlinks_url = str(i)
+ except Exception as e:
+ logger.error(e)
+
+
+
+
def parse_paragraph(paragraph):
paragraph_content = []
for run in paragraph.runs:
- if run.element.tag.endswith('r'):
+ if hasattr(run.element, 'tag') and isinstance(element.tag, str) and run.element.tag.endswith('r'):
drawing_elements = run.element.findall(
'.//{http://schemas.openxmlformats.org/wordprocessingml/2006/main}drawing')
for drawing in drawing_elements:
@@ -176,13 +248,14 @@ def parse_paragraph(paragraph):
paragraphs = doc.paragraphs.copy()
tables = doc.tables.copy()
for element in doc.element.body:
- if element.tag.endswith('p'): # paragraph
- para = paragraphs.pop(0)
- parsed_paragraph = parse_paragraph(para)
- if parsed_paragraph:
- content.append(parsed_paragraph)
- elif element.tag.endswith('tbl'): # table
- table = tables.pop(0)
- content.append(self._table_to_markdown(table))
+ if hasattr(element, 'tag'):
+ if isinstance(element.tag, str) and element.tag.endswith('p'): # paragraph
+ para = paragraphs.pop(0)
+ parsed_paragraph = parse_paragraph(para)
+ if parsed_paragraph:
+ content.append(parsed_paragraph)
+ elif isinstance(element.tag, str) and element.tag.endswith('tbl'): # table
+ table = tables.pop(0)
+ content.append(self._table_to_markdown(table,image_map))
return '\n'.join(content)
diff --git a/api/core/rag/models/document.py b/api/core/rag/models/document.py
index 7bb675b149c0e6..6f3c1c5d343977 100644
--- a/api/core/rag/models/document.py
+++ b/api/core/rag/models/document.py
@@ -10,6 +10,8 @@ class Document(BaseModel):
page_content: str
+ vector: Optional[list[float]] = None
+
"""Arbitrary metadata about the page content (e.g., source, relationships to other
documents, etc.).
"""
diff --git a/api/core/rag/rerank/weight_rerank.py b/api/core/rag/rerank/weight_rerank.py
index d07f94adb77250..d8a78739826a31 100644
--- a/api/core/rag/rerank/weight_rerank.py
+++ b/api/core/rag/rerank/weight_rerank.py
@@ -159,10 +159,9 @@ def _calculate_cosine(self, tenant_id: str, query: str, documents: list[Document
if 'score' in document.metadata:
query_vector_scores.append(document.metadata['score'])
else:
- content_vector = document.metadata['vector']
# transform to NumPy
vec1 = np.array(query_vector)
- vec2 = np.array(document.metadata['vector'])
+ vec2 = np.array(document.vector)
# calculate dot product
dot_product = np.dot(vec1, vec2)
diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py
index 2c5d920a9ad9d2..e9453647969a97 100644
--- a/api/core/rag/retrieval/dataset_retrieval.py
+++ b/api/core/rag/retrieval/dataset_retrieval.py
@@ -14,7 +14,8 @@
from core.model_runtime.entities.message_entities import PromptMessageTool
from core.model_runtime.entities.model_entities import ModelFeature, ModelType
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.ops.utils import measure_time
from core.rag.data_post_processor.data_post_processor import DataPostProcessor
from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler
@@ -278,6 +279,7 @@ def single_retrieve(
query=query,
top_k=top_k, score_threshold=score_threshold,
reranking_model=reranking_model,
+ reranking_mode=retrieval_model_config.get('reranking_mode', 'reranking_model'),
weights=retrieval_model_config.get('weights', None),
)
self._on_query(query, [dataset_id], app_id, user_from, user_id)
@@ -431,10 +433,12 @@ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, top_k: int,
dataset_id=dataset.id,
query=query,
top_k=top_k,
- score_threshold=retrieval_model['score_threshold']
+ score_threshold=retrieval_model.get('score_threshold', .0)
if retrieval_model['score_threshold_enabled'] else None,
- reranking_model=retrieval_model['reranking_model']
+ reranking_model=retrieval_model.get('reranking_model', None)
if retrieval_model['reranking_enable'] else None,
+ reranking_mode=retrieval_model.get('reranking_mode')
+ if retrieval_model.get('reranking_mode') else 'reranking_model',
weights=retrieval_model.get('weights', None),
)
diff --git a/api/core/rag/splitter/fixed_text_splitter.py b/api/core/rag/splitter/fixed_text_splitter.py
index fd714edf5e435b..6a0804f890db39 100644
--- a/api/core/rag/splitter/fixed_text_splitter.py
+++ b/api/core/rag/splitter/fixed_text_splitter.py
@@ -63,7 +63,7 @@ def split_text(self, text: str) -> list[str]:
if self._fixed_separator:
chunks = text.split(self._fixed_separator)
else:
- chunks = list(text)
+ chunks = [text]
final_chunks = []
for chunk in chunks:
diff --git a/api/core/tools/entities/tool_entities.py b/api/core/tools/entities/tool_entities.py
index 569a1d3238f87f..2e4433d9f6d2b9 100644
--- a/api/core/tools/entities/tool_entities.py
+++ b/api/core/tools/entities/tool_entities.py
@@ -46,7 +46,7 @@ def value_of(cls, value: str) -> 'ToolProviderType':
if mode.value == value:
return mode
raise ValueError(f'invalid mode value {value}')
-
+
class ApiProviderSchemaType(Enum):
"""
Enum class for api provider schema type.
@@ -68,7 +68,7 @@ def value_of(cls, value: str) -> 'ApiProviderSchemaType':
if mode.value == value:
return mode
raise ValueError(f'invalid mode value {value}')
-
+
class ApiProviderAuthType(Enum):
"""
Enum class for api provider auth type.
@@ -103,8 +103,8 @@ class MessageType(Enum):
"""
plain text, image url or link url
"""
- message: Union[str, bytes, dict] = None
- meta: dict[str, Any] = None
+ message: str | bytes | dict | None = None
+ meta: dict[str, Any] | None = None
save_as: str = ''
class ToolInvokeMessageBinary(BaseModel):
@@ -154,8 +154,8 @@ class ToolParameterForm(Enum):
options: Optional[list[ToolParameterOption]] = None
@classmethod
- def get_simple_instance(cls,
- name: str, llm_description: str, type: ToolParameterType,
+ def get_simple_instance(cls,
+ name: str, llm_description: str, type: ToolParameterType,
required: bool, options: Optional[list[str]] = None) -> 'ToolParameter':
"""
get a simple tool parameter
@@ -222,7 +222,7 @@ def value_of(cls, value: str) -> "ToolProviderCredentials.CredentialsType":
if mode.value == value:
return mode
raise ValueError(f'invalid mode value {value}')
-
+
@staticmethod
def default(value: str) -> str:
return ""
@@ -290,7 +290,7 @@ def dict(self) -> dict:
'tenant_id': self.tenant_id,
'pool': [variable.model_dump() for variable in self.pool],
}
-
+
def set_text(self, tool_name: str, name: str, value: str) -> None:
"""
set a text variable
@@ -301,7 +301,7 @@ def set_text(self, tool_name: str, name: str, value: str) -> None:
variable = cast(ToolRuntimeTextVariable, variable)
variable.value = value
return
-
+
variable = ToolRuntimeTextVariable(
type=ToolRuntimeVariableType.TEXT,
name=name,
@@ -334,7 +334,7 @@ def set_file(self, tool_name: str, value: str, name: str = None) -> None:
variable = cast(ToolRuntimeImageVariable, variable)
variable.value = value
return
-
+
variable = ToolRuntimeImageVariable(
type=ToolRuntimeVariableType.IMAGE,
name=name,
@@ -388,21 +388,21 @@ def empty(cls) -> 'ToolInvokeMeta':
Get an empty instance of ToolInvokeMeta
"""
return cls(time_cost=0.0, error=None, tool_config={})
-
+
@classmethod
def error_instance(cls, error: str) -> 'ToolInvokeMeta':
"""
Get an instance of ToolInvokeMeta with error
"""
return cls(time_cost=0.0, error=error, tool_config={})
-
+
def to_dict(self) -> dict:
return {
'time_cost': self.time_cost,
'error': self.error,
'tool_config': self.tool_config,
}
-
+
class ToolLabel(BaseModel):
"""
Tool label
@@ -416,4 +416,4 @@ class ToolInvokeFrom(Enum):
Enum class for tool invoke
"""
WORKFLOW = "workflow"
- AGENT = "agent"
\ No newline at end of file
+ AGENT = "agent"
diff --git a/api/core/tools/provider/builtin/_positions.py b/api/core/tools/provider/builtin/_positions.py
index ae806eaff4a032..062668fc5bf8bf 100644
--- a/api/core/tools/provider/builtin/_positions.py
+++ b/api/core/tools/provider/builtin/_positions.py
@@ -1,6 +1,6 @@
import os.path
-from core.helper.position_helper import get_position_map, sort_by_position_map
+from core.helper.position_helper import get_tool_position_map, sort_by_position_map
from core.tools.entities.api_entities import UserToolProvider
@@ -10,11 +10,11 @@ class BuiltinToolProviderSort:
@classmethod
def sort(cls, providers: list[UserToolProvider]) -> list[UserToolProvider]:
if not cls._position:
- cls._position = get_position_map(os.path.join(os.path.dirname(__file__), '..'))
+ cls._position = get_tool_position_map(os.path.join(os.path.dirname(__file__), '..'))
def name_func(provider: UserToolProvider) -> str:
return provider.name
sorted_providers = sort_by_position_map(cls._position, providers, name_func)
- return sorted_providers
\ No newline at end of file
+ return sorted_providers
diff --git a/api/core/tools/provider/builtin/did/_assets/icon.svg b/api/core/tools/provider/builtin/did/_assets/icon.svg
new file mode 100644
index 00000000000000..c477d7cb71dea2
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/_assets/icon.svg
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/did/did.py b/api/core/tools/provider/builtin/did/did.py
new file mode 100644
index 00000000000000..b4bf172131448d
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/did.py
@@ -0,0 +1,21 @@
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.did.tools.talks import TalksTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class DIDProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict) -> None:
+ try:
+ # Example validation using the D-ID talks tool
+ TalksTool().fork_tool_runtime(
+ runtime={"credentials": credentials}
+ ).invoke(
+ user_id='',
+ tool_parameters={
+ "source_url": "https://www.d-id.com/wp-content/uploads/2023/11/Hero-image-1.png",
+ "text_input": "Hello, welcome to use D-ID tool in Dify",
+ }
+ )
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/did/did.yaml b/api/core/tools/provider/builtin/did/did.yaml
new file mode 100644
index 00000000000000..a70b71812e4648
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/did.yaml
@@ -0,0 +1,28 @@
+identity:
+ author: Matri Qi
+ name: did
+ label:
+ en_US: D-ID
+ description:
+ en_US: D-ID is a tool enabling the creation of high-quality, custom videos of Digital Humans from a single image.
+ icon: icon.svg
+ tags:
+ - videos
+credentials_for_provider:
+ did_api_key:
+ type: secret-input
+ required: true
+ label:
+ en_US: D-ID API Key
+ placeholder:
+ en_US: Please input your D-ID API key
+ help:
+ en_US: Get your D-ID API key from your D-ID account settings.
+ url: https://studio.d-id.com/account-settings
+ base_url:
+ type: text-input
+ required: false
+ label:
+ en_US: D-ID server's Base URL
+ placeholder:
+ en_US: https://api.d-id.com
diff --git a/api/core/tools/provider/builtin/did/did_appx.py b/api/core/tools/provider/builtin/did/did_appx.py
new file mode 100644
index 00000000000000..964e82b729319e
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/did_appx.py
@@ -0,0 +1,87 @@
+import logging
+import time
+from collections.abc import Mapping
+from typing import Any
+
+import requests
+from requests.exceptions import HTTPError
+
+logger = logging.getLogger(__name__)
+
+
+class DIDApp:
+ def __init__(self, api_key: str | None = None, base_url: str | None = None):
+ self.api_key = api_key
+ self.base_url = base_url or 'https://api.d-id.com'
+ if not self.api_key:
+ raise ValueError('API key is required')
+
+ def _prepare_headers(self, idempotency_key: str | None = None):
+ headers = {'Content-Type': 'application/json', 'Authorization': f'Basic {self.api_key}'}
+ if idempotency_key:
+ headers['Idempotency-Key'] = idempotency_key
+ return headers
+
+ def _request(
+ self,
+ method: str,
+ url: str,
+ data: Mapping[str, Any] | None = None,
+ headers: Mapping[str, str] | None = None,
+ retries: int = 3,
+ backoff_factor: float = 0.3,
+ ) -> Mapping[str, Any] | None:
+ for i in range(retries):
+ try:
+ response = requests.request(method, url, json=data, headers=headers)
+ response.raise_for_status()
+ return response.json()
+ except requests.exceptions.RequestException as e:
+ if i < retries - 1 and isinstance(e, HTTPError) and e.response.status_code >= 500:
+ time.sleep(backoff_factor * (2**i))
+ else:
+ raise
+ return None
+
+ def talks(self, wait: bool = True, poll_interval: int = 5, idempotency_key: str | None = None, **kwargs):
+ endpoint = f'{self.base_url}/talks'
+ headers = self._prepare_headers(idempotency_key)
+ data = kwargs['params']
+ logger.debug(f'Send request to {endpoint=} body={data}')
+ response = self._request('POST', endpoint, data, headers)
+ if response is None:
+ raise HTTPError('Failed to initiate D-ID talks after multiple retries')
+ id: str = response['id']
+ if wait:
+ return self._monitor_job_status(id=id, target='talks', poll_interval=poll_interval)
+ return id
+
+ def animations(self, wait: bool = True, poll_interval: int = 5, idempotency_key: str | None = None, **kwargs):
+ endpoint = f'{self.base_url}/animations'
+ headers = self._prepare_headers(idempotency_key)
+ data = kwargs['params']
+ logger.debug(f'Send request to {endpoint=} body={data}')
+ response = self._request('POST', endpoint, data, headers)
+ if response is None:
+ raise HTTPError('Failed to initiate D-ID talks after multiple retries')
+ id: str = response['id']
+ if wait:
+ return self._monitor_job_status(target='animations', id=id, poll_interval=poll_interval)
+ return id
+
+ def check_did_status(self, target: str, id: str):
+ endpoint = f'{self.base_url}/{target}/{id}'
+ headers = self._prepare_headers()
+ response = self._request('GET', endpoint, headers=headers)
+ if response is None:
+ raise HTTPError(f'Failed to check status for talks {id} after multiple retries')
+ return response
+
+ def _monitor_job_status(self, target: str, id: str, poll_interval: int):
+ while True:
+ status = self.check_did_status(target=target, id=id)
+ if status['status'] == 'done':
+ return status
+ elif status['status'] == 'error' or status['status'] == 'rejected':
+ raise HTTPError(f'Talks {id} failed: {status["status"]} {status.get("error",{}).get("description")}')
+ time.sleep(poll_interval)
diff --git a/api/core/tools/provider/builtin/did/tools/animations.py b/api/core/tools/provider/builtin/did/tools/animations.py
new file mode 100644
index 00000000000000..e1d9de603fbb7a
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/tools/animations.py
@@ -0,0 +1,49 @@
+import json
+from typing import Any, Union
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.provider.builtin.did.did_appx import DIDApp
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class AnimationsTool(BuiltinTool):
+ def _invoke(
+ self, user_id: str, tool_parameters: dict[str, Any]
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ app = DIDApp(api_key=self.runtime.credentials['did_api_key'], base_url=self.runtime.credentials['base_url'])
+
+ driver_expressions_str = tool_parameters.get('driver_expressions')
+ driver_expressions = json.loads(driver_expressions_str) if driver_expressions_str else None
+
+ config = {
+ 'stitch': tool_parameters.get('stitch', True),
+ 'mute': tool_parameters.get('mute'),
+ 'result_format': tool_parameters.get('result_format') or 'mp4',
+ }
+ config = {k: v for k, v in config.items() if v is not None and v != ''}
+
+ options = {
+ 'source_url': tool_parameters['source_url'],
+ 'driver_url': tool_parameters.get('driver_url'),
+ 'config': config,
+ }
+ options = {k: v for k, v in options.items() if v is not None and v != ''}
+
+ if not options.get('source_url'):
+ raise ValueError('Source URL is required')
+
+ if config.get('logo_url'):
+ if not config.get('logo_x'):
+ raise ValueError('Logo X position is required when logo URL is provided')
+ if not config.get('logo_y'):
+ raise ValueError('Logo Y position is required when logo URL is provided')
+
+ animations_result = app.animations(params=options, wait=True)
+
+ if not isinstance(animations_result, str):
+ animations_result = json.dumps(animations_result, ensure_ascii=False, indent=4)
+
+ if not animations_result:
+ return self.create_text_message('D-ID animations request failed.')
+
+ return self.create_text_message(animations_result)
diff --git a/api/core/tools/provider/builtin/did/tools/animations.yaml b/api/core/tools/provider/builtin/did/tools/animations.yaml
new file mode 100644
index 00000000000000..2a2036c7b2a88f
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/tools/animations.yaml
@@ -0,0 +1,86 @@
+identity:
+ name: animations
+ author: Matri Qi
+ label:
+ en_US: Animations
+description:
+ human:
+ en_US: Animations enables to create videos matching head movements, expressions, emotions, and voice from a driver video and image.
+ llm: Animations enables to create videos matching head movements, expressions, emotions, and voice from a driver video and image.
+parameters:
+ - name: source_url
+ type: string
+ required: true
+ label:
+ en_US: source url
+ human_description:
+ en_US: The URL of the source image to be animated by the driver video, or a selection from the list of provided studio actors.
+ llm_description: The URL of the source image to be animated by the driver video, or a selection from the list of provided studio actors.
+ form: llm
+ - name: driver_url
+ type: string
+ required: false
+ label:
+ en_US: driver url
+ human_description:
+ en_US: The URL of the driver video to drive the animation, or a provided driver name from D-ID.
+ form: form
+ - name: mute
+ type: boolean
+ required: false
+ label:
+ en_US: mute
+ human_description:
+ en_US: Mutes the driver sound in the animated video result, defaults to true
+ form: form
+ - name: stitch
+ type: boolean
+ required: false
+ label:
+ en_US: stitch
+ human_description:
+ en_US: If enabled, the driver video will be stitched with the animationing head video.
+ form: form
+ - name: logo_url
+ type: string
+ required: false
+ label:
+ en_US: logo url
+ human_description:
+ en_US: The URL of the logo image to be added to the animation video.
+ form: form
+ - name: logo_x
+ type: number
+ required: false
+ label:
+ en_US: logo position x
+ human_description:
+ en_US: The x position of the logo image in the animation video. It's required when logo url is provided.
+ form: form
+ - name: logo_y
+ type: number
+ required: false
+ label:
+ en_US: logo position y
+ human_description:
+ en_US: The y position of the logo image in the animation video. It's required when logo url is provided.
+ form: form
+ - name: result_format
+ type: string
+ default: mp4
+ required: false
+ label:
+ en_US: result format
+ human_description:
+ en_US: The format of the result video.
+ form: form
+ options:
+ - value: mp4
+ label:
+ en_US: mp4
+ - value: gif
+ label:
+ en_US: gif
+ - value: mov
+ label:
+ en_US: mov
diff --git a/api/core/tools/provider/builtin/did/tools/talks.py b/api/core/tools/provider/builtin/did/tools/talks.py
new file mode 100644
index 00000000000000..06b2c4cb2f6049
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/tools/talks.py
@@ -0,0 +1,65 @@
+import json
+from typing import Any, Union
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.provider.builtin.did.did_appx import DIDApp
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class TalksTool(BuiltinTool):
+ def _invoke(
+ self, user_id: str, tool_parameters: dict[str, Any]
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ app = DIDApp(api_key=self.runtime.credentials['did_api_key'], base_url=self.runtime.credentials['base_url'])
+
+ driver_expressions_str = tool_parameters.get('driver_expressions')
+ driver_expressions = json.loads(driver_expressions_str) if driver_expressions_str else None
+
+ script = {
+ 'type': tool_parameters.get('script_type') or 'text',
+ 'input': tool_parameters.get('text_input'),
+ 'audio_url': tool_parameters.get('audio_url'),
+ 'reduce_noise': tool_parameters.get('audio_reduce_noise', False),
+ }
+ script = {k: v for k, v in script.items() if v is not None and v != ''}
+ config = {
+ 'stitch': tool_parameters.get('stitch', True),
+ 'sharpen': tool_parameters.get('sharpen'),
+ 'fluent': tool_parameters.get('fluent'),
+ 'result_format': tool_parameters.get('result_format') or 'mp4',
+ 'pad_audio': tool_parameters.get('pad_audio'),
+ 'driver_expressions': driver_expressions,
+ }
+ config = {k: v for k, v in config.items() if v is not None and v != ''}
+
+ options = {
+ 'source_url': tool_parameters['source_url'],
+ 'driver_url': tool_parameters.get('driver_url'),
+ 'script': script,
+ 'config': config,
+ }
+ options = {k: v for k, v in options.items() if v is not None and v != ''}
+
+ if not options.get('source_url'):
+ raise ValueError('Source URL is required')
+
+ if script.get('type') == 'audio':
+ script.pop('input', None)
+ if not script.get('audio_url'):
+ raise ValueError('Audio URL is required for audio script type')
+
+ if script.get('type') == 'text':
+ script.pop('audio_url', None)
+ script.pop('reduce_noise', None)
+ if not script.get('input'):
+ raise ValueError('Text input is required for text script type')
+
+ talks_result = app.talks(params=options, wait=True)
+
+ if not isinstance(talks_result, str):
+ talks_result = json.dumps(talks_result, ensure_ascii=False, indent=4)
+
+ if not talks_result:
+ return self.create_text_message('D-ID talks request failed.')
+
+ return self.create_text_message(talks_result)
diff --git a/api/core/tools/provider/builtin/did/tools/talks.yaml b/api/core/tools/provider/builtin/did/tools/talks.yaml
new file mode 100644
index 00000000000000..88d430512923e4
--- /dev/null
+++ b/api/core/tools/provider/builtin/did/tools/talks.yaml
@@ -0,0 +1,126 @@
+identity:
+ name: talks
+ author: Matri Qi
+ label:
+ en_US: Talks
+description:
+ human:
+ en_US: Talks enables the creation of realistic talking head videos from text or audio inputs.
+ llm: Talks enables the creation of realistic talking head videos from text or audio inputs.
+parameters:
+ - name: source_url
+ type: string
+ required: true
+ label:
+ en_US: source url
+ human_description:
+ en_US: The URL of the source image to be animated by the driver video, or a selection from the list of provided studio actors.
+ llm_description: The URL of the source image to be animated by the driver video, or a selection from the list of provided studio actors.
+ form: llm
+ - name: driver_url
+ type: string
+ required: false
+ label:
+ en_US: driver url
+ human_description:
+ en_US: The URL of the driver video to drive the talk, or a provided driver name from D-ID.
+ form: form
+ - name: script_type
+ type: string
+ required: false
+ label:
+ en_US: script type
+ human_description:
+ en_US: The type of the script.
+ form: form
+ options:
+ - value: text
+ label:
+ en_US: text
+ - value: audio
+ label:
+ en_US: audio
+ - name: text_input
+ type: string
+ required: false
+ label:
+ en_US: text input
+ human_description:
+ en_US: The text input to be spoken by the talking head. Required when script type is text.
+ form: form
+ - name: audio_url
+ type: string
+ required: false
+ label:
+ en_US: audio url
+ human_description:
+ en_US: The URL of the audio file to be spoken by the talking head. Required when script type is audio.
+ form: form
+ - name: audio_reduce_noise
+ type: boolean
+ required: false
+ label:
+ en_US: audio reduce noise
+ human_description:
+ en_US: If enabled, the audio will be processed to reduce noise before being spoken by the talking head. It only works when script type is audio.
+ form: form
+ - name: stitch
+ type: boolean
+ required: false
+ label:
+ en_US: stitch
+ human_description:
+ en_US: If enabled, the driver video will be stitched with the talking head video.
+ form: form
+ - name: sharpen
+ type: boolean
+ required: false
+ label:
+ en_US: sharpen
+ human_description:
+ en_US: If enabled, the talking head video will be sharpened.
+ form: form
+ - name: result_format
+ type: string
+ required: false
+ label:
+ en_US: result format
+ human_description:
+ en_US: The format of the result video.
+ form: form
+ options:
+ - value: mp4
+ label:
+ en_US: mp4
+ - value: gif
+ label:
+ en_US: gif
+ - value: mov
+ label:
+ en_US: mov
+ - name: fluent
+ type: boolean
+ required: false
+ label:
+ en_US: fluent
+ human_description:
+ en_US: Interpolate between the last & first frames of the driver video When used together with pad_audio can create a seamless transition between videos of the same driver
+ form: form
+ - name: pad_audio
+ type: number
+ required: false
+ label:
+ en_US: pad audio
+ human_description:
+ en_US: Pad the audio with silence at the end (given in seconds) Will increase the video duration & the credits it consumes
+ form: form
+ min: 1
+ max: 60
+ - name: driver_expressions
+ type: string
+ required: false
+ label:
+ en_US: driver expressions
+ human_description:
+ en_US: timed expressions for animation. It should be an JSON array style string. Take D-ID documentation(https://docs.d-id.com/reference/createtalk) for more information.
+ form: form
diff --git a/api/core/tools/provider/builtin/gitlab/_assets/gitlab.svg b/api/core/tools/provider/builtin/gitlab/_assets/gitlab.svg
new file mode 100644
index 00000000000000..07734077d5d300
--- /dev/null
+++ b/api/core/tools/provider/builtin/gitlab/_assets/gitlab.svg
@@ -0,0 +1,2 @@
+
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/gitlab/gitlab.py b/api/core/tools/provider/builtin/gitlab/gitlab.py
new file mode 100644
index 00000000000000..fca34ae15f9070
--- /dev/null
+++ b/api/core/tools/provider/builtin/gitlab/gitlab.py
@@ -0,0 +1,34 @@
+from typing import Any
+
+import requests
+
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class GitlabProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict[str, Any]) -> None:
+ try:
+ if 'access_tokens' not in credentials or not credentials.get('access_tokens'):
+ raise ToolProviderCredentialValidationError("Gitlab Access Tokens is required.")
+
+ if 'site_url' not in credentials or not credentials.get('site_url'):
+ site_url = 'https://gitlab.com'
+ else:
+ site_url = credentials.get('site_url')
+
+ try:
+ headers = {
+ "Content-Type": "application/vnd.text+json",
+ "Authorization": f"Bearer {credentials.get('access_tokens')}",
+ }
+
+ response = requests.get(
+ url= f"{site_url}/api/v4/user",
+ headers=headers)
+ if response.status_code != 200:
+ raise ToolProviderCredentialValidationError((response.json()).get('message'))
+ except Exception as e:
+ raise ToolProviderCredentialValidationError("Gitlab Access Tokens and Api Version is invalid. {}".format(e))
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/gitlab/gitlab.yaml b/api/core/tools/provider/builtin/gitlab/gitlab.yaml
new file mode 100644
index 00000000000000..b5feea23823449
--- /dev/null
+++ b/api/core/tools/provider/builtin/gitlab/gitlab.yaml
@@ -0,0 +1,38 @@
+identity:
+ author: Leo.Wang
+ name: gitlab
+ label:
+ en_US: Gitlab
+ zh_Hans: Gitlab
+ description:
+ en_US: Gitlab plugin for commit
+ zh_Hans: 用于获取Gitlab commit的插件
+ icon: gitlab.svg
+credentials_for_provider:
+ access_tokens:
+ type: secret-input
+ required: true
+ label:
+ en_US: Gitlab access token
+ zh_Hans: Gitlab access token
+ placeholder:
+ en_US: Please input your Gitlab access token
+ zh_Hans: 请输入你的 Gitlab access token
+ help:
+ en_US: Get your Gitlab access token from Gitlab
+ zh_Hans: 从 Gitlab 获取您的 access token
+ url: https://docs.gitlab.com/16.9/ee/api/oauth2.html
+ site_url:
+ type: text-input
+ required: false
+ default: 'https://gitlab.com'
+ label:
+ en_US: Gitlab site url
+ zh_Hans: Gitlab site url
+ placeholder:
+ en_US: Please input your Gitlab site url
+ zh_Hans: 请输入你的 Gitlab site url
+ help:
+ en_US: Find your Gitlab url
+ zh_Hans: 找到你的Gitlab url
+ url: https://gitlab.com/help
diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py
new file mode 100644
index 00000000000000..212bdb03abaaad
--- /dev/null
+++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.py
@@ -0,0 +1,101 @@
+import json
+from datetime import datetime, timedelta
+from typing import Any, Union
+
+import requests
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class GitlabCommitsTool(BuiltinTool):
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any]
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+
+ project = tool_parameters.get('project', '')
+ employee = tool_parameters.get('employee', '')
+ start_time = tool_parameters.get('start_time', '')
+ end_time = tool_parameters.get('end_time', '')
+
+ if not project:
+ return self.create_text_message('Project is required')
+
+ if not start_time:
+ start_time = (datetime.utcnow() - timedelta(days=1)).isoformat()
+ if not end_time:
+ end_time = datetime.utcnow().isoformat()
+
+ access_token = self.runtime.credentials.get('access_tokens')
+ site_url = self.runtime.credentials.get('site_url')
+
+ if 'access_tokens' not in self.runtime.credentials or not self.runtime.credentials.get('access_tokens'):
+ return self.create_text_message("Gitlab API Access Tokens is required.")
+ if 'site_url' not in self.runtime.credentials or not self.runtime.credentials.get('site_url'):
+ site_url = 'https://gitlab.com'
+
+ # Get commit content
+ result = self.fetch(user_id, site_url, access_token, project, employee, start_time, end_time)
+
+ return self.create_text_message(json.dumps(result, ensure_ascii=False))
+
+ def fetch(self,user_id: str, site_url: str, access_token: str, project: str, employee: str = None, start_time: str = '', end_time: str = '') -> list[dict[str, Any]]:
+ domain = site_url
+ headers = {"PRIVATE-TOKEN": access_token}
+ results = []
+
+ try:
+ # Get all of projects
+ url = f"{domain}/api/v4/projects"
+ response = requests.get(url, headers=headers)
+ response.raise_for_status()
+ projects = response.json()
+
+ filtered_projects = [p for p in projects if project == "*" or p['name'] == project]
+
+ for project in filtered_projects:
+ project_id = project['id']
+ project_name = project['name']
+ print(f"Project: {project_name}")
+
+ # Get all of proejct commits
+ commits_url = f"{domain}/api/v4/projects/{project_id}/repository/commits"
+ params = {
+ 'since': start_time,
+ 'until': end_time
+ }
+ if employee:
+ params['author'] = employee
+
+ commits_response = requests.get(commits_url, headers=headers, params=params)
+ commits_response.raise_for_status()
+ commits = commits_response.json()
+
+ for commit in commits:
+ commit_sha = commit['id']
+ print(f"\tCommit SHA: {commit_sha}")
+
+ diff_url = f"{domain}/api/v4/projects/{project_id}/repository/commits/{commit_sha}/diff"
+ diff_response = requests.get(diff_url, headers=headers)
+ diff_response.raise_for_status()
+ diffs = diff_response.json()
+
+ for diff in diffs:
+ # Caculate code lines of changed
+ added_lines = diff['diff'].count('\n+')
+ removed_lines = diff['diff'].count('\n-')
+ total_changes = added_lines + removed_lines
+
+ if total_changes > 1:
+ final_code = ''.join([line[1:] for line in diff['diff'].split('\n') if line.startswith('+') and not line.startswith('+++')])
+ results.append({
+ "project": project_name,
+ "commit_sha": commit_sha,
+ "diff": final_code
+ })
+ print(f"Commit code:{final_code}")
+ except requests.RequestException as e:
+ print(f"Error fetching data from GitLab: {e}")
+
+ return results
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml
new file mode 100644
index 00000000000000..fc4e7eb7bb3ed4
--- /dev/null
+++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_commits.yaml
@@ -0,0 +1,56 @@
+identity:
+ name: gitlab_commits
+ author: Leo.Wang
+ label:
+ en_US: Gitlab Commits
+ zh_Hans: Gitlab代码提交内容
+description:
+ human:
+ en_US: A tool for query gitlab commits. Input should be a exists username.
+ zh_Hans: 一个用于查询gitlab代码提交记录的的工具,输入的内容应该是一个已存在的用户名或者项目名。
+ llm: A tool for query gitlab commits. Input should be a exists username or project.
+parameters:
+ - name: employee
+ type: string
+ required: false
+ label:
+ en_US: employee
+ zh_Hans: 员工用户名
+ human_description:
+ en_US: employee
+ zh_Hans: 员工用户名
+ llm_description: employee for gitlab
+ form: llm
+ - name: project
+ type: string
+ required: true
+ label:
+ en_US: project
+ zh_Hans: 项目名
+ human_description:
+ en_US: project
+ zh_Hans: 项目名
+ llm_description: project for gitlab
+ form: llm
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: start_time
+ zh_Hans: 开始时间
+ human_description:
+ en_US: start_time
+ zh_Hans: 开始时间
+ llm_description: start_time for gitlab
+ form: llm
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: end_time
+ zh_Hans: 结束时间
+ human_description:
+ en_US: end_time
+ zh_Hans: 结束时间
+ llm_description: end_time for gitlab
+ form: llm
diff --git a/api/core/tools/provider/builtin/json_process/tools/insert.py b/api/core/tools/provider/builtin/json_process/tools/insert.py
index 27e34f1ff3fda9..48d1bdcab48885 100644
--- a/api/core/tools/provider/builtin/json_process/tools/insert.py
+++ b/api/core/tools/provider/builtin/json_process/tools/insert.py
@@ -36,21 +36,26 @@ def _invoke(self,
# get create path
create_path = tool_parameters.get('create_path', False)
+ # get value decode.
+ # if true, it will be decoded to an dict
+ value_decode = tool_parameters.get('value_decode', False)
+
ensure_ascii = tool_parameters.get('ensure_ascii', True)
try:
- result = self._insert(content, query, new_value, ensure_ascii, index, create_path)
+ result = self._insert(content, query, new_value, ensure_ascii, value_decode, index, create_path)
return self.create_text_message(str(result))
except Exception:
return self.create_text_message('Failed to insert JSON content')
- def _insert(self, origin_json, query, new_value, ensure_ascii: bool, index=None, create_path=False):
+ def _insert(self, origin_json, query, new_value, ensure_ascii: bool, value_decode: bool, index=None, create_path=False):
try:
input_data = json.loads(origin_json)
expr = parse(query)
- try:
- new_value = json.loads(new_value)
- except json.JSONDecodeError:
- new_value = new_value
+ if value_decode is True:
+ try:
+ new_value = json.loads(new_value)
+ except json.JSONDecodeError:
+ return "Cannot decode new value to json object"
matches = expr.find(input_data)
diff --git a/api/core/tools/provider/builtin/json_process/tools/insert.yaml b/api/core/tools/provider/builtin/json_process/tools/insert.yaml
index 63e7816455cb68..21b51312dab6b3 100644
--- a/api/core/tools/provider/builtin/json_process/tools/insert.yaml
+++ b/api/core/tools/provider/builtin/json_process/tools/insert.yaml
@@ -47,10 +47,22 @@ parameters:
pt_BR: New Value
human_description:
en_US: New Value
- zh_Hans: 新值
+ zh_Hans: 插入的新值
pt_BR: New Value
llm_description: New Value to insert
form: llm
+ - name: value_decode
+ type: boolean
+ default: false
+ label:
+ en_US: Decode Value
+ zh_Hans: 解码值
+ pt_BR: Decode Value
+ human_description:
+ en_US: Whether to decode the value to a JSON object
+ zh_Hans: 是否将值解码为 JSON 对象
+ pt_BR: Whether to decode the value to a JSON object
+ form: form
- name: create_path
type: select
required: true
diff --git a/api/core/tools/provider/builtin/json_process/tools/replace.py b/api/core/tools/provider/builtin/json_process/tools/replace.py
index be696bce0e0a2c..b19198aa938942 100644
--- a/api/core/tools/provider/builtin/json_process/tools/replace.py
+++ b/api/core/tools/provider/builtin/json_process/tools/replace.py
@@ -35,6 +35,10 @@ def _invoke(self,
if not replace_model:
return self.create_text_message('Invalid parameter replace_model')
+ # get value decode.
+ # if true, it will be decoded to an dict
+ value_decode = tool_parameters.get('value_decode', False)
+
ensure_ascii = tool_parameters.get('ensure_ascii', True)
try:
if replace_model == 'pattern':
@@ -42,17 +46,17 @@ def _invoke(self,
replace_pattern = tool_parameters.get('replace_pattern', '')
if not replace_pattern:
return self.create_text_message('Invalid parameter replace_pattern')
- result = self._replace_pattern(content, query, replace_pattern, replace_value, ensure_ascii)
+ result = self._replace_pattern(content, query, replace_pattern, replace_value, ensure_ascii, value_decode)
elif replace_model == 'key':
result = self._replace_key(content, query, replace_value, ensure_ascii)
elif replace_model == 'value':
- result = self._replace_value(content, query, replace_value, ensure_ascii)
+ result = self._replace_value(content, query, replace_value, ensure_ascii, value_decode)
return self.create_text_message(str(result))
except Exception:
return self.create_text_message('Failed to replace JSON content')
# Replace pattern
- def _replace_pattern(self, content: str, query: str, replace_pattern: str, replace_value: str, ensure_ascii: bool) -> str:
+ def _replace_pattern(self, content: str, query: str, replace_pattern: str, replace_value: str, ensure_ascii: bool, value_decode: bool) -> str:
try:
input_data = json.loads(content)
expr = parse(query)
@@ -61,6 +65,12 @@ def _replace_pattern(self, content: str, query: str, replace_pattern: str, repla
for match in matches:
new_value = match.value.replace(replace_pattern, replace_value)
+ if value_decode is True:
+ try:
+ new_value = json.loads(new_value)
+ except json.JSONDecodeError:
+ return "Cannot decode replace value to json object"
+
match.full_path.update(input_data, new_value)
return json.dumps(input_data, ensure_ascii=ensure_ascii)
@@ -92,10 +102,15 @@ def _replace_key(self, content: str, query: str, replace_value: str, ensure_asci
return str(e)
# Replace value
- def _replace_value(self, content: str, query: str, replace_value: str, ensure_ascii: bool) -> str:
+ def _replace_value(self, content: str, query: str, replace_value: str, ensure_ascii: bool, value_decode: bool) -> str:
try:
input_data = json.loads(content)
expr = parse(query)
+ if value_decode is True:
+ try:
+ replace_value = json.loads(replace_value)
+ except json.JSONDecodeError:
+ return "Cannot decode replace value to json object"
matches = expr.find(input_data)
diff --git a/api/core/tools/provider/builtin/json_process/tools/replace.yaml b/api/core/tools/provider/builtin/json_process/tools/replace.yaml
index cf4b1dc63f45b8..ae238b1fbcd05e 100644
--- a/api/core/tools/provider/builtin/json_process/tools/replace.yaml
+++ b/api/core/tools/provider/builtin/json_process/tools/replace.yaml
@@ -60,10 +60,22 @@ parameters:
pt_BR: Replace Value
human_description:
en_US: New Value
- zh_Hans: New Value
+ zh_Hans: 新值
pt_BR: New Value
llm_description: New Value to replace
form: llm
+ - name: value_decode
+ type: boolean
+ default: false
+ label:
+ en_US: Decode Value
+ zh_Hans: 解码值
+ pt_BR: Decode Value
+ human_description:
+ en_US: Whether to decode the value to a JSON object (Does not apply to replace key)
+ zh_Hans: 是否将值解码为 JSON 对象 (不适用于键替换)
+ pt_BR: Whether to decode the value to a JSON object (Does not apply to replace key)
+ form: form
- name: replace_model
type: select
required: true
diff --git a/api/core/tools/provider/builtin/regex/_assets/icon.svg b/api/core/tools/provider/builtin/regex/_assets/icon.svg
new file mode 100644
index 00000000000000..0231a2b4aa9da2
--- /dev/null
+++ b/api/core/tools/provider/builtin/regex/_assets/icon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/regex/regex.py b/api/core/tools/provider/builtin/regex/regex.py
new file mode 100644
index 00000000000000..d38ae1b292675f
--- /dev/null
+++ b/api/core/tools/provider/builtin/regex/regex.py
@@ -0,0 +1,19 @@
+from typing import Any
+
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.regex.tools.regex_extract import RegexExpressionTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class RegexProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict[str, Any]) -> None:
+ try:
+ RegexExpressionTool().invoke(
+ user_id='',
+ tool_parameters={
+ 'content': '1+(2+3)*4',
+ 'expression': r'(\d+)',
+ },
+ )
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
diff --git a/api/core/tools/provider/builtin/regex/regex.yaml b/api/core/tools/provider/builtin/regex/regex.yaml
new file mode 100644
index 00000000000000..d05776f214e8d2
--- /dev/null
+++ b/api/core/tools/provider/builtin/regex/regex.yaml
@@ -0,0 +1,15 @@
+identity:
+ author: zhuhao
+ name: regex
+ label:
+ en_US: Regex
+ zh_Hans: 正则表达式提取
+ pt_BR: Regex
+ description:
+ en_US: A tool for regex extraction.
+ zh_Hans: 一个用于正则表达式内容提取的工具。
+ pt_BR: A tool for regex extraction.
+ icon: icon.svg
+ tags:
+ - utilities
+ - productivity
diff --git a/api/core/tools/provider/builtin/regex/tools/regex_extract.py b/api/core/tools/provider/builtin/regex/tools/regex_extract.py
new file mode 100644
index 00000000000000..5d8f013d0d012c
--- /dev/null
+++ b/api/core/tools/provider/builtin/regex/tools/regex_extract.py
@@ -0,0 +1,27 @@
+import re
+from typing import Any, Union
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class RegexExpressionTool(BuiltinTool):
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ """
+ invoke tools
+ """
+ # get expression
+ content = tool_parameters.get('content', '').strip()
+ if not content:
+ return self.create_text_message('Invalid content')
+ expression = tool_parameters.get('expression', '').strip()
+ if not expression:
+ return self.create_text_message('Invalid expression')
+ try:
+ result = re.findall(expression, content)
+ return self.create_text_message(str(result))
+ except Exception as e:
+ return self.create_text_message(f'Failed to extract result, error: {str(e)}')
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/regex/tools/regex_extract.yaml b/api/core/tools/provider/builtin/regex/tools/regex_extract.yaml
new file mode 100644
index 00000000000000..de4100def176c9
--- /dev/null
+++ b/api/core/tools/provider/builtin/regex/tools/regex_extract.yaml
@@ -0,0 +1,38 @@
+identity:
+ name: regex_extract
+ author: zhuhao
+ label:
+ en_US: Regex Extract
+ zh_Hans: 正则表达式内容提取
+ pt_BR: Regex Extract
+description:
+ human:
+ en_US: A tool for extracting matching content using regular expressions.
+ zh_Hans: 一个用于利用正则表达式提取匹配内容结果的工具。
+ pt_BR: A tool for extracting matching content using regular expressions.
+ llm: A tool for extracting matching content using regular expressions.
+parameters:
+ - name: content
+ type: string
+ required: true
+ label:
+ en_US: Content to be extracted
+ zh_Hans: 内容
+ pt_BR: Content to be extracted
+ human_description:
+ en_US: Content to be extracted
+ zh_Hans: 内容
+ pt_BR: Content to be extracted
+ form: llm
+ - name: expression
+ type: string
+ required: true
+ label:
+ en_US: Regular expression
+ zh_Hans: 正则表达式
+ pt_BR: Regular expression
+ human_description:
+ en_US: Regular expression
+ zh_Hans: 正则表达式
+ pt_BR: Regular expression
+ form: llm
diff --git a/api/core/tools/provider/builtin/searxng/docker/settings.yml b/api/core/tools/provider/builtin/searxng/docker/settings.yml
new file mode 100644
index 00000000000000..18e18688002cbc
--- /dev/null
+++ b/api/core/tools/provider/builtin/searxng/docker/settings.yml
@@ -0,0 +1,2501 @@
+general:
+ # Debug mode, only for development. Is overwritten by ${SEARXNG_DEBUG}
+ debug: false
+ # displayed name
+ instance_name: "searxng"
+ # For example: https://example.com/privacy
+ privacypolicy_url: false
+ # use true to use your own donation page written in searx/info/en/donate.md
+ # use false to disable the donation link
+ donation_url: false
+ # mailto:contact@example.com
+ contact_url: false
+ # record stats
+ enable_metrics: true
+
+brand:
+ new_issue_url: https://github.com/searxng/searxng/issues/new
+ docs_url: https://docs.searxng.org/
+ public_instances: https://searx.space
+ wiki_url: https://github.com/searxng/searxng/wiki
+ issue_url: https://github.com/searxng/searxng/issues
+ # custom:
+ # maintainer: "Jon Doe"
+ # # Custom entries in the footer: [title]: [link]
+ # links:
+ # Uptime: https://uptime.searxng.org/history/darmarit-org
+ # About: "https://searxng.org"
+
+search:
+ # Filter results. 0: None, 1: Moderate, 2: Strict
+ safe_search: 0
+ # Existing autocomplete backends: "dbpedia", "duckduckgo", "google", "yandex", "mwmbl",
+ # "seznam", "startpage", "stract", "swisscows", "qwant", "wikipedia" - leave blank to turn it off
+ # by default.
+ autocomplete: ""
+ # minimun characters to type before autocompleter starts
+ autocomplete_min: 4
+ # Default search language - leave blank to detect from browser information or
+ # use codes from 'languages.py'
+ default_lang: "auto"
+ # max_page: 0 # if engine supports paging, 0 means unlimited numbers of pages
+ # Available languages
+ # languages:
+ # - all
+ # - en
+ # - en-US
+ # - de
+ # - it-IT
+ # - fr
+ # - fr-BE
+ # ban time in seconds after engine errors
+ ban_time_on_fail: 5
+ # max ban time in seconds after engine errors
+ max_ban_time_on_fail: 120
+ suspended_times:
+ # Engine suspension time after error (in seconds; set to 0 to disable)
+ # For error "Access denied" and "HTTP error [402, 403]"
+ SearxEngineAccessDenied: 86400
+ # For error "CAPTCHA"
+ SearxEngineCaptcha: 86400
+ # For error "Too many request" and "HTTP error 429"
+ SearxEngineTooManyRequests: 3600
+ # Cloudflare CAPTCHA
+ cf_SearxEngineCaptcha: 1296000
+ cf_SearxEngineAccessDenied: 86400
+ # ReCAPTCHA
+ recaptcha_SearxEngineCaptcha: 604800
+
+ # remove format to deny access, use lower case.
+ # formats: [html, csv, json, rss]
+ formats:
+ - html
+ - json
+
+server:
+ # Is overwritten by ${SEARXNG_PORT} and ${SEARXNG_BIND_ADDRESS}
+ port: 8888
+ bind_address: "127.0.0.1"
+ # public URL of the instance, to ensure correct inbound links. Is overwritten
+ # by ${SEARXNG_URL}.
+ base_url: http://0.0.0.0:8081/ # "http://example.com/location"
+ # rate limit the number of request on the instance, block some bots.
+ # Is overwritten by ${SEARXNG_LIMITER}
+ limiter: false
+ # enable features designed only for public instances.
+ # Is overwritten by ${SEARXNG_PUBLIC_INSTANCE}
+ public_instance: false
+
+ # If your instance owns a /etc/searxng/settings.yml file, then set the following
+ # values there.
+
+ secret_key: "772ba36386fb56d0f8fe818941552dabbe69220d4c0eb4a385a5729cdbc20c2d" # Is overwritten by ${SEARXNG_SECRET}
+ # Proxy image results through SearXNG. Is overwritten by ${SEARXNG_IMAGE_PROXY}
+ image_proxy: false
+ # 1.0 and 1.1 are supported
+ http_protocol_version: "1.0"
+ # POST queries are more secure as they don't show up in history but may cause
+ # problems when using Firefox containers
+ method: "POST"
+ default_http_headers:
+ X-Content-Type-Options: nosniff
+ X-Download-Options: noopen
+ X-Robots-Tag: noindex, nofollow
+ Referrer-Policy: no-referrer
+
+redis:
+ # URL to connect redis database. Is overwritten by ${SEARXNG_REDIS_URL}.
+ # https://docs.searxng.org/admin/settings/settings_redis.html#settings-redis
+ url: false
+
+ui:
+ # Custom static path - leave it blank if you didn't change
+ static_path: ""
+ # Is overwritten by ${SEARXNG_STATIC_USE_HASH}.
+ static_use_hash: false
+ # Custom templates path - leave it blank if you didn't change
+ templates_path: ""
+ # query_in_title: When true, the result page's titles contains the query
+ # it decreases the privacy, since the browser can records the page titles.
+ query_in_title: false
+ # infinite_scroll: When true, automatically loads the next page when scrolling to bottom of the current page.
+ infinite_scroll: false
+ # ui theme
+ default_theme: simple
+ # center the results ?
+ center_alignment: false
+ # URL prefix of the internet archive, don't forget trailing slash (if needed).
+ # cache_url: "https://webcache.googleusercontent.com/search?q=cache:"
+ # Default interface locale - leave blank to detect from browser information or
+ # use codes from the 'locales' config section
+ default_locale: ""
+ # Open result links in a new tab by default
+ # results_on_new_tab: false
+ theme_args:
+ # style of simple theme: auto, light, dark
+ simple_style: auto
+ # Perform search immediately if a category selected.
+ # Disable to select multiple categories at once and start the search manually.
+ search_on_category_select: true
+ # Hotkeys: default or vim
+ hotkeys: default
+
+# Lock arbitrary settings on the preferences page. To find the ID of the user
+# setting you want to lock, check the ID of the form on the page "preferences".
+#
+# preferences:
+# lock:
+# - language
+# - autocomplete
+# - method
+# - query_in_title
+
+# searx supports result proxification using an external service:
+# https://github.com/asciimoo/morty uncomment below section if you have running
+# morty proxy the key is base64 encoded (keep the !!binary notation)
+# Note: since commit af77ec3, morty accepts a base64 encoded key.
+#
+# result_proxy:
+# url: http://127.0.0.1:3000/
+# # the key is a base64 encoded string, the YAML !!binary prefix is optional
+# key: !!binary "your_morty_proxy_key"
+# # [true|false] enable the "proxy" button next to each result
+# proxify_results: true
+
+# communication with search engines
+#
+outgoing:
+ # default timeout in seconds, can be override by engine
+ request_timeout: 3.0
+ # the maximum timeout in seconds
+ # max_request_timeout: 10.0
+ # suffix of searx_useragent, could contain information like an email address
+ # to the administrator
+ useragent_suffix: ""
+ # The maximum number of concurrent connections that may be established.
+ pool_connections: 100
+ # Allow the connection pool to maintain keep-alive connections below this
+ # point.
+ pool_maxsize: 20
+ # See https://www.python-httpx.org/http2/
+ enable_http2: true
+ # uncomment below section if you want to use a custom server certificate
+ # see https://www.python-httpx.org/advanced/#changing-the-verification-defaults
+ # and https://www.python-httpx.org/compatibility/#ssl-configuration
+ # verify: ~/.mitmproxy/mitmproxy-ca-cert.cer
+ #
+ # uncomment below section if you want to use a proxyq see: SOCKS proxies
+ # https://2.python-requests.org/en/latest/user/advanced/#proxies
+ # are also supported: see
+ # https://2.python-requests.org/en/latest/user/advanced/#socks
+ #
+ # proxies:
+ # all://:
+ # - http://host.docker.internal:1080
+ #
+ # using_tor_proxy: true
+ #
+ # Extra seconds to add in order to account for the time taken by the proxy
+ #
+ # extra_proxy_timeout: 10
+ #
+ # uncomment below section only if you have more than one network interface
+ # which can be the source of outgoing search requests
+ #
+ # source_ips:
+ # - 1.1.1.1
+ # - 1.1.1.2
+ # - fe80::/126
+
+# External plugin configuration, for more details see
+# https://docs.searxng.org/dev/plugins.html
+#
+# plugins:
+# - plugin1
+# - plugin2
+# - ...
+
+# Comment or un-comment plugin to activate / deactivate by default.
+#
+# enabled_plugins:
+# # these plugins are enabled if nothing is configured ..
+# - 'Hash plugin'
+# - 'Self Information'
+# - 'Tracker URL remover'
+# - 'Ahmia blacklist' # activation depends on outgoing.using_tor_proxy
+# # these plugins are disabled if nothing is configured ..
+# - 'Hostnames plugin' # see 'hostnames' configuration below
+# - 'Basic Calculator'
+# - 'Open Access DOI rewrite'
+# - 'Tor check plugin'
+# # Read the docs before activate: auto-detection of the language could be
+# # detrimental to users expectations / users can activate the plugin in the
+# # preferences if they want.
+# - 'Autodetect search language'
+
+# Configuration of the "Hostnames plugin":
+#
+# hostnames:
+# replace:
+# '(.*\.)?youtube\.com$': 'invidious.example.com'
+# '(.*\.)?youtu\.be$': 'invidious.example.com'
+# '(.*\.)?reddit\.com$': 'teddit.example.com'
+# '(.*\.)?redd\.it$': 'teddit.example.com'
+# '(www\.)?twitter\.com$': 'nitter.example.com'
+# remove:
+# - '(.*\.)?facebook.com$'
+# low_priority:
+# - '(.*\.)?google(\..*)?$'
+# high_priority:
+# - '(.*\.)?wikipedia.org$'
+#
+# Alternatively you can use external files for configuring the "Hostnames plugin":
+#
+# hostnames:
+# replace: 'rewrite-hosts.yml'
+#
+# Content of 'rewrite-hosts.yml' (place the file in the same directory as 'settings.yml'):
+# '(.*\.)?youtube\.com$': 'invidious.example.com'
+# '(.*\.)?youtu\.be$': 'invidious.example.com'
+#
+
+checker:
+ # disable checker when in debug mode
+ off_when_debug: true
+
+ # use "scheduling: false" to disable scheduling
+ # scheduling: interval or int
+
+ # to activate the scheduler:
+ # * uncomment "scheduling" section
+ # * add "cache2 = name=searxngcache,items=2000,blocks=2000,blocksize=4096,bitmap=1"
+ # to your uwsgi.ini
+
+ # scheduling:
+ # start_after: [300, 1800] # delay to start the first run of the checker
+ # every: [86400, 90000] # how often the checker runs
+
+ # additional tests: only for the YAML anchors (see the engines section)
+ #
+ additional_tests:
+ rosebud: &test_rosebud
+ matrix:
+ query: rosebud
+ lang: en
+ result_container:
+ - not_empty
+ - ['one_title_contains', 'citizen kane']
+ test:
+ - unique_results
+
+ android: &test_android
+ matrix:
+ query: ['android']
+ lang: ['en', 'de', 'fr', 'zh-CN']
+ result_container:
+ - not_empty
+ - ['one_title_contains', 'google']
+ test:
+ - unique_results
+
+ # tests: only for the YAML anchors (see the engines section)
+ tests:
+ infobox: &tests_infobox
+ infobox:
+ matrix:
+ query: ["linux", "new york", "bbc"]
+ result_container:
+ - has_infobox
+
+categories_as_tabs:
+ general:
+ images:
+ videos:
+ news:
+ map:
+ music:
+ it:
+ science:
+ files:
+ social media:
+
+engines:
+ - name: 9gag
+ engine: 9gag
+ shortcut: 9g
+ disabled: true
+
+ - name: alpine linux packages
+ engine: alpinelinux
+ disabled: true
+ shortcut: alp
+
+ - name: annas archive
+ engine: annas_archive
+ disabled: true
+ shortcut: aa
+
+ # - name: annas articles
+ # engine: annas_archive
+ # shortcut: aaa
+ # # https://docs.searxng.org/dev/engines/online/annas_archive.html
+ # aa_content: 'magazine' # book_fiction, book_unknown, book_nonfiction, book_comic
+ # aa_ext: 'pdf' # pdf, epub, ..
+ # aa_sort: oldest' # newest, oldest, largest, smallest
+
+ - name: apk mirror
+ engine: apkmirror
+ timeout: 4.0
+ shortcut: apkm
+ disabled: true
+
+ - name: apple app store
+ engine: apple_app_store
+ shortcut: aps
+ disabled: true
+
+ # Requires Tor
+ - name: ahmia
+ engine: ahmia
+ categories: onions
+ enable_http: true
+ shortcut: ah
+
+ - name: anaconda
+ engine: xpath
+ paging: true
+ first_page_num: 0
+ search_url: https://anaconda.org/search?q={query}&page={pageno}
+ results_xpath: //tbody/tr
+ url_xpath: ./td/h5/a[last()]/@href
+ title_xpath: ./td/h5
+ content_xpath: ./td[h5]/text()
+ categories: it
+ timeout: 6.0
+ shortcut: conda
+ disabled: true
+
+ - name: arch linux wiki
+ engine: archlinux
+ shortcut: al
+
+ - name: artic
+ engine: artic
+ shortcut: arc
+ timeout: 4.0
+
+ - name: arxiv
+ engine: arxiv
+ shortcut: arx
+ timeout: 4.0
+
+ - name: ask
+ engine: ask
+ shortcut: ask
+ disabled: true
+
+ # tmp suspended: dh key too small
+ # - name: base
+ # engine: base
+ # shortcut: bs
+
+ - name: bandcamp
+ engine: bandcamp
+ shortcut: bc
+ categories: music
+
+ - name: wikipedia
+ engine: wikipedia
+ shortcut: wp
+ # add "list" to the array to get results in the results list
+ display_type: ["infobox"]
+ base_url: 'https://{language}.wikipedia.org/'
+ categories: [general]
+
+ - name: bilibili
+ engine: bilibili
+ shortcut: bil
+ disabled: true
+
+ - name: bing
+ engine: bing
+ shortcut: bi
+ disabled: false
+
+ - name: bing images
+ engine: bing_images
+ shortcut: bii
+
+ - name: bing news
+ engine: bing_news
+ shortcut: bin
+
+ - name: bing videos
+ engine: bing_videos
+ shortcut: biv
+
+ - name: bitbucket
+ engine: xpath
+ paging: true
+ search_url: https://bitbucket.org/repo/all/{pageno}?name={query}
+ url_xpath: //article[@class="repo-summary"]//a[@class="repo-link"]/@href
+ title_xpath: //article[@class="repo-summary"]//a[@class="repo-link"]
+ content_xpath: //article[@class="repo-summary"]/p
+ categories: [it, repos]
+ timeout: 4.0
+ disabled: true
+ shortcut: bb
+ about:
+ website: https://bitbucket.org/
+ wikidata_id: Q2493781
+ official_api_documentation: https://developer.atlassian.com/bitbucket
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: bpb
+ engine: bpb
+ shortcut: bpb
+ disabled: true
+
+ - name: btdigg
+ engine: btdigg
+ shortcut: bt
+ disabled: true
+
+ - name: openverse
+ engine: openverse
+ categories: images
+ shortcut: opv
+
+ - name: media.ccc.de
+ engine: ccc_media
+ shortcut: c3tv
+ # We don't set language: de here because media.ccc.de is not just
+ # for a German audience. It contains many English videos and many
+ # German videos have English subtitles.
+ disabled: true
+
+ - name: chefkoch
+ engine: chefkoch
+ shortcut: chef
+ # to show premium or plus results too:
+ # skip_premium: false
+
+ # - name: core.ac.uk
+ # engine: core
+ # categories: science
+ # shortcut: cor
+ # # get your API key from: https://core.ac.uk/api-keys/register/
+ # api_key: 'unset'
+
+ - name: cppreference
+ engine: cppreference
+ shortcut: cpp
+ paging: false
+ disabled: true
+
+ - name: crossref
+ engine: crossref
+ shortcut: cr
+ timeout: 30
+ disabled: true
+
+ - name: crowdview
+ engine: json_engine
+ shortcut: cv
+ categories: general
+ paging: false
+ search_url: https://crowdview-next-js.onrender.com/api/search-v3?query={query}
+ results_query: results
+ url_query: link
+ title_query: title
+ content_query: snippet
+ disabled: true
+ about:
+ website: https://crowdview.ai/
+
+ - name: yep
+ engine: yep
+ shortcut: yep
+ categories: general
+ search_type: web
+ timeout: 5
+ disabled: true
+
+ - name: yep images
+ engine: yep
+ shortcut: yepi
+ categories: images
+ search_type: images
+ disabled: true
+
+ - name: yep news
+ engine: yep
+ shortcut: yepn
+ categories: news
+ search_type: news
+ disabled: true
+
+ - name: curlie
+ engine: xpath
+ shortcut: cl
+ categories: general
+ disabled: true
+ paging: true
+ lang_all: ''
+ search_url: https://curlie.org/search?q={query}&lang={lang}&start={pageno}&stime=92452189
+ page_size: 20
+ results_xpath: //div[@id="site-list-content"]/div[@class="site-item"]
+ url_xpath: ./div[@class="title-and-desc"]/a/@href
+ title_xpath: ./div[@class="title-and-desc"]/a/div
+ content_xpath: ./div[@class="title-and-desc"]/div[@class="site-descr"]
+ about:
+ website: https://curlie.org/
+ wikidata_id: Q60715723
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: currency
+ engine: currency_convert
+ categories: general
+ shortcut: cc
+
+ - name: bahnhof
+ engine: json_engine
+ search_url: https://www.bahnhof.de/api/stations/search/{query}
+ url_prefix: https://www.bahnhof.de/
+ url_query: slug
+ title_query: name
+ content_query: state
+ shortcut: bf
+ disabled: true
+ about:
+ website: https://www.bahn.de
+ wikidata_id: Q22811603
+ use_official_api: false
+ require_api_key: false
+ results: JSON
+ language: de
+ tests:
+ bahnhof:
+ matrix:
+ query: berlin
+ lang: en
+ result_container:
+ - not_empty
+ - ['one_title_contains', 'Berlin Hauptbahnhof']
+ test:
+ - unique_results
+
+ - name: deezer
+ engine: deezer
+ shortcut: dz
+ disabled: true
+
+ - name: destatis
+ engine: destatis
+ shortcut: destat
+ disabled: true
+
+ - name: deviantart
+ engine: deviantart
+ shortcut: da
+ timeout: 3.0
+
+ - name: ddg definitions
+ engine: duckduckgo_definitions
+ shortcut: ddd
+ weight: 2
+ disabled: true
+ tests: *tests_infobox
+
+ # cloudflare protected
+ # - name: digbt
+ # engine: digbt
+ # shortcut: dbt
+ # timeout: 6.0
+ # disabled: true
+
+ - name: docker hub
+ engine: docker_hub
+ shortcut: dh
+ categories: [it, packages]
+
+ - name: encyclosearch
+ engine: json_engine
+ shortcut: es
+ categories: general
+ paging: true
+ search_url: https://encyclosearch.org/encyclosphere/search?q={query}&page={pageno}&resultsPerPage=15
+ results_query: Results
+ url_query: SourceURL
+ title_query: Title
+ content_query: Description
+ disabled: true
+ about:
+ website: https://encyclosearch.org
+ official_api_documentation: https://encyclosearch.org/docs/#/rest-api
+ use_official_api: true
+ require_api_key: false
+ results: JSON
+
+ - name: erowid
+ engine: xpath
+ paging: true
+ first_page_num: 0
+ page_size: 30
+ search_url: https://www.erowid.org/search.php?q={query}&s={pageno}
+ url_xpath: //dl[@class="results-list"]/dt[@class="result-title"]/a/@href
+ title_xpath: //dl[@class="results-list"]/dt[@class="result-title"]/a/text()
+ content_xpath: //dl[@class="results-list"]/dd[@class="result-details"]
+ categories: []
+ shortcut: ew
+ disabled: true
+ about:
+ website: https://www.erowid.org/
+ wikidata_id: Q1430691
+ official_api_documentation:
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ # - name: elasticsearch
+ # shortcut: es
+ # engine: elasticsearch
+ # base_url: http://localhost:9200
+ # username: elastic
+ # password: changeme
+ # index: my-index
+ # # available options: match, simple_query_string, term, terms, custom
+ # query_type: match
+ # # if query_type is set to custom, provide your query here
+ # #custom_query_json: {"query":{"match_all": {}}}
+ # #show_metadata: false
+ # disabled: true
+
+ - name: wikidata
+ engine: wikidata
+ shortcut: wd
+ timeout: 3.0
+ weight: 2
+ # add "list" to the array to get results in the results list
+ display_type: ["infobox"]
+ tests: *tests_infobox
+ categories: [general]
+
+ - name: duckduckgo
+ engine: duckduckgo
+ shortcut: ddg
+
+ - name: duckduckgo images
+ engine: duckduckgo_extra
+ categories: [images, web]
+ ddg_category: images
+ shortcut: ddi
+ disabled: true
+
+ - name: duckduckgo videos
+ engine: duckduckgo_extra
+ categories: [videos, web]
+ ddg_category: videos
+ shortcut: ddv
+ disabled: true
+
+ - name: duckduckgo news
+ engine: duckduckgo_extra
+ categories: [news, web]
+ ddg_category: news
+ shortcut: ddn
+ disabled: true
+
+ - name: duckduckgo weather
+ engine: duckduckgo_weather
+ shortcut: ddw
+ disabled: true
+
+ - name: apple maps
+ engine: apple_maps
+ shortcut: apm
+ disabled: true
+ timeout: 5.0
+
+ - name: emojipedia
+ engine: emojipedia
+ timeout: 4.0
+ shortcut: em
+ disabled: true
+
+ - name: tineye
+ engine: tineye
+ shortcut: tin
+ timeout: 9.0
+ disabled: true
+
+ - name: etymonline
+ engine: xpath
+ paging: true
+ search_url: https://etymonline.com/search?page={pageno}&q={query}
+ url_xpath: //a[contains(@class, "word__name--")]/@href
+ title_xpath: //a[contains(@class, "word__name--")]
+ content_xpath: //section[contains(@class, "word__defination")]
+ first_page_num: 1
+ shortcut: et
+ categories: [dictionaries]
+ about:
+ website: https://www.etymonline.com/
+ wikidata_id: Q1188617
+ official_api_documentation:
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ # - name: ebay
+ # engine: ebay
+ # shortcut: eb
+ # base_url: 'https://www.ebay.com'
+ # disabled: true
+ # timeout: 5
+
+ - name: 1x
+ engine: www1x
+ shortcut: 1x
+ timeout: 3.0
+ disabled: true
+
+ - name: fdroid
+ engine: fdroid
+ shortcut: fd
+ disabled: true
+
+ - name: findthatmeme
+ engine: findthatmeme
+ shortcut: ftm
+ disabled: true
+
+ - name: flickr
+ categories: images
+ shortcut: fl
+ # You can use the engine using the official stable API, but you need an API
+ # key, see: https://www.flickr.com/services/apps/create/
+ # engine: flickr
+ # api_key: 'apikey' # required!
+ # Or you can use the html non-stable engine, activated by default
+ engine: flickr_noapi
+
+ - name: free software directory
+ engine: mediawiki
+ shortcut: fsd
+ categories: [it, software wikis]
+ base_url: https://directory.fsf.org/
+ search_type: title
+ timeout: 5.0
+ disabled: true
+ about:
+ website: https://directory.fsf.org/
+ wikidata_id: Q2470288
+
+ # - name: freesound
+ # engine: freesound
+ # shortcut: fnd
+ # disabled: true
+ # timeout: 15.0
+ # API key required, see: https://freesound.org/docs/api/overview.html
+ # api_key: MyAPIkey
+
+ - name: frinkiac
+ engine: frinkiac
+ shortcut: frk
+ disabled: true
+
+ - name: fyyd
+ engine: fyyd
+ shortcut: fy
+ timeout: 8.0
+ disabled: true
+
+ - name: geizhals
+ engine: geizhals
+ shortcut: geiz
+ disabled: true
+
+ - name: genius
+ engine: genius
+ shortcut: gen
+
+ - name: gentoo
+ engine: mediawiki
+ shortcut: ge
+ categories: ["it", "software wikis"]
+ base_url: "https://wiki.gentoo.org/"
+ api_path: "api.php"
+ search_type: text
+ timeout: 10
+
+ - name: gitlab
+ engine: json_engine
+ paging: true
+ search_url: https://gitlab.com/api/v4/projects?search={query}&page={pageno}
+ url_query: web_url
+ title_query: name_with_namespace
+ content_query: description
+ page_size: 20
+ categories: [it, repos]
+ shortcut: gl
+ timeout: 10.0
+ disabled: true
+ about:
+ website: https://about.gitlab.com/
+ wikidata_id: Q16639197
+ official_api_documentation: https://docs.gitlab.com/ee/api/
+ use_official_api: false
+ require_api_key: false
+ results: JSON
+
+ - name: github
+ engine: github
+ shortcut: gh
+
+ - name: codeberg
+ # https://docs.searxng.org/dev/engines/online/gitea.html
+ engine: gitea
+ base_url: https://codeberg.org
+ shortcut: cb
+ disabled: true
+
+ - name: gitea.com
+ engine: gitea
+ base_url: https://gitea.com
+ shortcut: gitea
+ disabled: true
+
+ - name: goodreads
+ engine: goodreads
+ shortcut: good
+ timeout: 4.0
+ disabled: true
+
+ - name: google
+ engine: google
+ shortcut: go
+ # additional_tests:
+ # android: *test_android
+
+ - name: google images
+ engine: google_images
+ shortcut: goi
+ # additional_tests:
+ # android: *test_android
+ # dali:
+ # matrix:
+ # query: ['Dali Christ']
+ # lang: ['en', 'de', 'fr', 'zh-CN']
+ # result_container:
+ # - ['one_title_contains', 'Salvador']
+
+ - name: google news
+ engine: google_news
+ shortcut: gon
+ # additional_tests:
+ # android: *test_android
+
+ - name: google videos
+ engine: google_videos
+ shortcut: gov
+ # additional_tests:
+ # android: *test_android
+
+ - name: google scholar
+ engine: google_scholar
+ shortcut: gos
+
+ - name: google play apps
+ engine: google_play
+ categories: [files, apps]
+ shortcut: gpa
+ play_categ: apps
+ disabled: true
+
+ - name: google play movies
+ engine: google_play
+ categories: videos
+ shortcut: gpm
+ play_categ: movies
+ disabled: true
+
+ - name: material icons
+ engine: material_icons
+ categories: images
+ shortcut: mi
+ disabled: true
+
+ - name: gpodder
+ engine: json_engine
+ shortcut: gpod
+ timeout: 4.0
+ paging: false
+ search_url: https://gpodder.net/search.json?q={query}
+ url_query: url
+ title_query: title
+ content_query: description
+ page_size: 19
+ categories: music
+ disabled: true
+ about:
+ website: https://gpodder.net
+ wikidata_id: Q3093354
+ official_api_documentation: https://gpoddernet.readthedocs.io/en/latest/api/
+ use_official_api: false
+ requires_api_key: false
+ results: JSON
+
+ - name: habrahabr
+ engine: xpath
+ paging: true
+ search_url: https://habr.com/en/search/page{pageno}/?q={query}
+ results_xpath: //article[contains(@class, "tm-articles-list__item")]
+ url_xpath: .//a[@class="tm-title__link"]/@href
+ title_xpath: .//a[@class="tm-title__link"]
+ content_xpath: .//div[contains(@class, "article-formatted-body")]
+ categories: it
+ timeout: 4.0
+ disabled: true
+ shortcut: habr
+ about:
+ website: https://habr.com/
+ wikidata_id: Q4494434
+ official_api_documentation: https://habr.com/en/docs/help/api/
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: hackernews
+ engine: hackernews
+ shortcut: hn
+ disabled: true
+
+ - name: hex
+ engine: hex
+ shortcut: hex
+ disabled: true
+ # Valid values: name inserted_at updated_at total_downloads recent_downloads
+ sort_criteria: "recent_downloads"
+ page_size: 10
+
+ - name: crates.io
+ engine: crates
+ shortcut: crates
+ disabled: true
+ timeout: 6.0
+
+ - name: hoogle
+ engine: xpath
+ search_url: https://hoogle.haskell.org/?hoogle={query}
+ results_xpath: '//div[@class="result"]'
+ title_xpath: './/div[@class="ans"]//a'
+ url_xpath: './/div[@class="ans"]//a/@href'
+ content_xpath: './/div[@class="from"]'
+ page_size: 20
+ categories: [it, packages]
+ shortcut: ho
+ about:
+ website: https://hoogle.haskell.org/
+ wikidata_id: Q34010
+ official_api_documentation: https://hackage.haskell.org/api
+ use_official_api: false
+ require_api_key: false
+ results: JSON
+
+ - name: imdb
+ engine: imdb
+ shortcut: imdb
+ timeout: 6.0
+ disabled: true
+
+ - name: imgur
+ engine: imgur
+ shortcut: img
+ disabled: true
+
+ - name: ina
+ engine: ina
+ shortcut: in
+ timeout: 6.0
+ disabled: true
+
+ - name: invidious
+ engine: invidious
+ # Instanes will be selected randomly, see https://api.invidious.io/ for
+ # instances that are stable (good uptime) and close to you.
+ base_url:
+ - https://invidious.io.lol
+ - https://invidious.fdn.fr
+ - https://yt.artemislena.eu
+ - https://invidious.tiekoetter.com
+ - https://invidious.flokinet.to
+ - https://vid.puffyan.us
+ - https://invidious.privacydev.net
+ - https://inv.tux.pizza
+ shortcut: iv
+ timeout: 3.0
+ disabled: true
+
+ - name: jisho
+ engine: jisho
+ shortcut: js
+ timeout: 3.0
+ disabled: true
+
+ - name: kickass
+ engine: kickass
+ base_url:
+ - https://kickasstorrents.to
+ - https://kickasstorrents.cr
+ - https://kickasstorrent.cr
+ - https://kickass.sx
+ - https://kat.am
+ shortcut: kc
+ timeout: 4.0
+ disabled: true
+
+ - name: lemmy communities
+ engine: lemmy
+ lemmy_type: Communities
+ shortcut: leco
+
+ - name: lemmy users
+ engine: lemmy
+ network: lemmy communities
+ lemmy_type: Users
+ shortcut: leus
+
+ - name: lemmy posts
+ engine: lemmy
+ network: lemmy communities
+ lemmy_type: Posts
+ shortcut: lepo
+
+ - name: lemmy comments
+ engine: lemmy
+ network: lemmy communities
+ lemmy_type: Comments
+ shortcut: lecom
+
+ - name: library genesis
+ engine: xpath
+ # search_url: https://libgen.is/search.php?req={query}
+ search_url: https://libgen.rs/search.php?req={query}
+ url_xpath: //a[contains(@href,"book/index.php?md5")]/@href
+ title_xpath: //a[contains(@href,"book/")]/text()[1]
+ content_xpath: //td/a[1][contains(@href,"=author")]/text()
+ categories: files
+ timeout: 7.0
+ disabled: true
+ shortcut: lg
+ about:
+ website: https://libgen.fun/
+ wikidata_id: Q22017206
+ official_api_documentation:
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: z-library
+ engine: zlibrary
+ shortcut: zlib
+ categories: files
+ timeout: 7.0
+ disabled: true
+
+ - name: library of congress
+ engine: loc
+ shortcut: loc
+ categories: images
+
+ - name: libretranslate
+ engine: libretranslate
+ # https://github.com/LibreTranslate/LibreTranslate?tab=readme-ov-file#mirrors
+ base_url:
+ - https://translate.terraprint.co
+ - https://trans.zillyhuhn.com
+ # api_key: abc123
+ shortcut: lt
+ disabled: true
+
+ - name: lingva
+ engine: lingva
+ shortcut: lv
+ # set lingva instance in url, by default it will use the official instance
+ # url: https://lingva.thedaviddelta.com
+
+ - name: lobste.rs
+ engine: xpath
+ search_url: https://lobste.rs/search?q={query}&what=stories&order=relevance
+ results_xpath: //li[contains(@class, "story")]
+ url_xpath: .//a[@class="u-url"]/@href
+ title_xpath: .//a[@class="u-url"]
+ content_xpath: .//a[@class="domain"]
+ categories: it
+ shortcut: lo
+ timeout: 5.0
+ disabled: true
+ about:
+ website: https://lobste.rs/
+ wikidata_id: Q60762874
+ official_api_documentation:
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: mastodon users
+ engine: mastodon
+ mastodon_type: accounts
+ base_url: https://mastodon.social
+ shortcut: mau
+
+ - name: mastodon hashtags
+ engine: mastodon
+ mastodon_type: hashtags
+ base_url: https://mastodon.social
+ shortcut: mah
+
+ # - name: matrixrooms
+ # engine: mrs
+ # # https://docs.searxng.org/dev/engines/online/mrs.html
+ # # base_url: https://mrs-api-host
+ # shortcut: mtrx
+ # disabled: true
+
+ - name: mdn
+ shortcut: mdn
+ engine: json_engine
+ categories: [it]
+ paging: true
+ search_url: https://developer.mozilla.org/api/v1/search?q={query}&page={pageno}
+ results_query: documents
+ url_query: mdn_url
+ url_prefix: https://developer.mozilla.org
+ title_query: title
+ content_query: summary
+ about:
+ website: https://developer.mozilla.org
+ wikidata_id: Q3273508
+ official_api_documentation: null
+ use_official_api: false
+ require_api_key: false
+ results: JSON
+
+ - name: metacpan
+ engine: metacpan
+ shortcut: cpan
+ disabled: true
+ number_of_results: 20
+
+ # - name: meilisearch
+ # engine: meilisearch
+ # shortcut: mes
+ # enable_http: true
+ # base_url: http://localhost:7700
+ # index: my-index
+
+ - name: mixcloud
+ engine: mixcloud
+ shortcut: mc
+
+ # MongoDB engine
+ # Required dependency: pymongo
+ # - name: mymongo
+ # engine: mongodb
+ # shortcut: md
+ # exact_match_only: false
+ # host: '127.0.0.1'
+ # port: 27017
+ # enable_http: true
+ # results_per_page: 20
+ # database: 'business'
+ # collection: 'reviews' # name of the db collection
+ # key: 'name' # key in the collection to search for
+
+ - name: mozhi
+ engine: mozhi
+ base_url:
+ - https://mozhi.aryak.me
+ - https://translate.bus-hit.me
+ - https://nyc1.mz.ggtyler.dev
+ # mozhi_engine: google - see https://mozhi.aryak.me for supported engines
+ timeout: 4.0
+ shortcut: mz
+ disabled: true
+
+ - name: mwmbl
+ engine: mwmbl
+ # api_url: https://api.mwmbl.org
+ shortcut: mwm
+ disabled: true
+
+ - name: npm
+ engine: npm
+ shortcut: npm
+ timeout: 5.0
+ disabled: true
+
+ - name: nyaa
+ engine: nyaa
+ shortcut: nt
+ disabled: true
+
+ - name: mankier
+ engine: json_engine
+ search_url: https://www.mankier.com/api/v2/mans/?q={query}
+ results_query: results
+ url_query: url
+ title_query: name
+ content_query: description
+ categories: it
+ shortcut: man
+ about:
+ website: https://www.mankier.com/
+ official_api_documentation: https://www.mankier.com/api
+ use_official_api: true
+ require_api_key: false
+ results: JSON
+
+ # read https://docs.searxng.org/dev/engines/online/mullvad_leta.html
+ # - name: mullvadleta
+ # engine: mullvad_leta
+ # leta_engine: google # choose one of the following: google, brave
+ # use_cache: true # Only 100 non-cache searches per day, suggested only for private instances
+ # search_url: https://leta.mullvad.net
+ # categories: [general, web]
+ # shortcut: ml
+
+ - name: odysee
+ engine: odysee
+ shortcut: od
+ disabled: true
+
+ - name: openairedatasets
+ engine: json_engine
+ paging: true
+ search_url: https://api.openaire.eu/search/datasets?format=json&page={pageno}&size=10&title={query}
+ results_query: response/results/result
+ url_query: metadata/oaf:entity/oaf:result/children/instance/webresource/url/$
+ title_query: metadata/oaf:entity/oaf:result/title/$
+ content_query: metadata/oaf:entity/oaf:result/description/$
+ content_html_to_text: true
+ categories: "science"
+ shortcut: oad
+ timeout: 5.0
+ about:
+ website: https://www.openaire.eu/
+ wikidata_id: Q25106053
+ official_api_documentation: https://api.openaire.eu/
+ use_official_api: false
+ require_api_key: false
+ results: JSON
+
+ - name: openairepublications
+ engine: json_engine
+ paging: true
+ search_url: https://api.openaire.eu/search/publications?format=json&page={pageno}&size=10&title={query}
+ results_query: response/results/result
+ url_query: metadata/oaf:entity/oaf:result/children/instance/webresource/url/$
+ title_query: metadata/oaf:entity/oaf:result/title/$
+ content_query: metadata/oaf:entity/oaf:result/description/$
+ content_html_to_text: true
+ categories: science
+ shortcut: oap
+ timeout: 5.0
+ about:
+ website: https://www.openaire.eu/
+ wikidata_id: Q25106053
+ official_api_documentation: https://api.openaire.eu/
+ use_official_api: false
+ require_api_key: false
+ results: JSON
+
+ - name: openmeteo
+ engine: open_meteo
+ shortcut: om
+ disabled: true
+
+ # - name: opensemanticsearch
+ # engine: opensemantic
+ # shortcut: oss
+ # base_url: 'http://localhost:8983/solr/opensemanticsearch/'
+
+ - name: openstreetmap
+ engine: openstreetmap
+ shortcut: osm
+
+ - name: openrepos
+ engine: xpath
+ paging: true
+ search_url: https://openrepos.net/search/node/{query}?page={pageno}
+ url_xpath: //li[@class="search-result"]//h3[@class="title"]/a/@href
+ title_xpath: //li[@class="search-result"]//h3[@class="title"]/a
+ content_xpath: //li[@class="search-result"]//div[@class="search-snippet-info"]//p[@class="search-snippet"]
+ categories: files
+ timeout: 4.0
+ disabled: true
+ shortcut: or
+ about:
+ website: https://openrepos.net/
+ wikidata_id:
+ official_api_documentation:
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: packagist
+ engine: json_engine
+ paging: true
+ search_url: https://packagist.org/search.json?q={query}&page={pageno}
+ results_query: results
+ url_query: url
+ title_query: name
+ content_query: description
+ categories: [it, packages]
+ disabled: true
+ timeout: 5.0
+ shortcut: pack
+ about:
+ website: https://packagist.org
+ wikidata_id: Q108311377
+ official_api_documentation: https://packagist.org/apidoc
+ use_official_api: true
+ require_api_key: false
+ results: JSON
+
+ - name: pdbe
+ engine: pdbe
+ shortcut: pdb
+ # Hide obsolete PDB entries. Default is not to hide obsolete structures
+ # hide_obsolete: false
+
+ - name: photon
+ engine: photon
+ shortcut: ph
+
+ - name: pinterest
+ engine: pinterest
+ shortcut: pin
+
+ - name: piped
+ engine: piped
+ shortcut: ppd
+ categories: videos
+ piped_filter: videos
+ timeout: 3.0
+
+ # URL to use as link and for embeds
+ frontend_url: https://srv.piped.video
+ # Instance will be selected randomly, for more see https://piped-instances.kavin.rocks/
+ backend_url:
+ - https://pipedapi.kavin.rocks
+ - https://pipedapi-libre.kavin.rocks
+ - https://pipedapi.adminforge.de
+
+ - name: piped.music
+ engine: piped
+ network: piped
+ shortcut: ppdm
+ categories: music
+ piped_filter: music_songs
+ timeout: 3.0
+
+ - name: piratebay
+ engine: piratebay
+ shortcut: tpb
+ # You may need to change this URL to a proxy if piratebay is blocked in your
+ # country
+ url: https://thepiratebay.org/
+ timeout: 3.0
+
+ - name: pixiv
+ shortcut: pv
+ engine: pixiv
+ disabled: true
+ inactive: true
+ pixiv_image_proxies:
+ - https://pximg.example.org
+ # A proxy is required to load the images. Hosting an image proxy server
+ # for Pixiv:
+ # --> https://pixivfe.pages.dev/hosting-image-proxy-server/
+ # Proxies from public instances. Ask the public instances owners if they
+ # agree to receive traffic from SearXNG!
+ # --> https://codeberg.org/VnPower/PixivFE#instances
+ # --> https://github.com/searxng/searxng/pull/3192#issuecomment-1941095047
+ # image proxy of https://pixiv.cat
+ # - https://i.pixiv.cat
+ # image proxy of https://www.pixiv.pics
+ # - https://pximg.cocomi.eu.org
+ # image proxy of https://pixivfe.exozy.me
+ # - https://pximg.exozy.me
+ # image proxy of https://pixivfe.ducks.party
+ # - https://pixiv.ducks.party
+ # image proxy of https://pixiv.perennialte.ch
+ # - https://pximg.perennialte.ch
+
+ - name: podcastindex
+ engine: podcastindex
+ shortcut: podcast
+
+ # Required dependency: psychopg2
+ # - name: postgresql
+ # engine: postgresql
+ # database: postgres
+ # username: postgres
+ # password: postgres
+ # limit: 10
+ # query_str: 'SELECT * from my_table WHERE my_column = %(query)s'
+ # shortcut : psql
+
+ - name: presearch
+ engine: presearch
+ search_type: search
+ categories: [general, web]
+ shortcut: ps
+ timeout: 4.0
+ disabled: true
+
+ - name: presearch images
+ engine: presearch
+ network: presearch
+ search_type: images
+ categories: [images, web]
+ timeout: 4.0
+ shortcut: psimg
+ disabled: true
+
+ - name: presearch videos
+ engine: presearch
+ network: presearch
+ search_type: videos
+ categories: [general, web]
+ timeout: 4.0
+ shortcut: psvid
+ disabled: true
+
+ - name: presearch news
+ engine: presearch
+ network: presearch
+ search_type: news
+ categories: [news, web]
+ timeout: 4.0
+ shortcut: psnews
+ disabled: true
+
+ - name: pub.dev
+ engine: xpath
+ shortcut: pd
+ search_url: https://pub.dev/packages?q={query}&page={pageno}
+ paging: true
+ results_xpath: //div[contains(@class,"packages-item")]
+ url_xpath: ./div/h3/a/@href
+ title_xpath: ./div/h3/a
+ content_xpath: ./div/div/div[contains(@class,"packages-description")]/span
+ categories: [packages, it]
+ timeout: 3.0
+ disabled: true
+ first_page_num: 1
+ about:
+ website: https://pub.dev/
+ official_api_documentation: https://pub.dev/help/api
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: pubmed
+ engine: pubmed
+ shortcut: pub
+ timeout: 3.0
+
+ - name: pypi
+ shortcut: pypi
+ engine: pypi
+
+ - name: qwant
+ qwant_categ: web
+ engine: qwant
+ disabled: true
+ shortcut: qw
+ categories: [general, web]
+ additional_tests:
+ rosebud: *test_rosebud
+
+ - name: qwant news
+ qwant_categ: news
+ engine: qwant
+ shortcut: qwn
+ categories: news
+ network: qwant
+
+ - name: qwant images
+ qwant_categ: images
+ engine: qwant
+ shortcut: qwi
+ categories: [images, web]
+ network: qwant
+
+ - name: qwant videos
+ qwant_categ: videos
+ engine: qwant
+ shortcut: qwv
+ categories: [videos, web]
+ network: qwant
+
+ # - name: library
+ # engine: recoll
+ # shortcut: lib
+ # base_url: 'https://recoll.example.org/'
+ # search_dir: ''
+ # mount_prefix: /export
+ # dl_prefix: 'https://download.example.org'
+ # timeout: 30.0
+ # categories: files
+ # disabled: true
+
+ # - name: recoll library reference
+ # engine: recoll
+ # base_url: 'https://recoll.example.org/'
+ # search_dir: reference
+ # mount_prefix: /export
+ # dl_prefix: 'https://download.example.org'
+ # shortcut: libr
+ # timeout: 30.0
+ # categories: files
+ # disabled: true
+
+ - name: radio browser
+ engine: radio_browser
+ shortcut: rb
+
+ - name: reddit
+ engine: reddit
+ shortcut: re
+ page_size: 25
+ disabled: true
+
+ - name: rottentomatoes
+ engine: rottentomatoes
+ shortcut: rt
+ disabled: true
+
+ # Required dependency: redis
+ # - name: myredis
+ # shortcut : rds
+ # engine: redis_server
+ # exact_match_only: false
+ # host: '127.0.0.1'
+ # port: 6379
+ # enable_http: true
+ # password: ''
+ # db: 0
+
+ # tmp suspended: bad certificate
+ # - name: scanr structures
+ # shortcut: scs
+ # engine: scanr_structures
+ # disabled: true
+
+ - name: searchmysite
+ engine: xpath
+ shortcut: sms
+ categories: general
+ paging: true
+ search_url: https://searchmysite.net/search/?q={query}&page={pageno}
+ results_xpath: //div[contains(@class,'search-result')]
+ url_xpath: .//a[contains(@class,'result-link')]/@href
+ title_xpath: .//span[contains(@class,'result-title-txt')]/text()
+ content_xpath: ./p[@id='result-hightlight']
+ disabled: true
+ about:
+ website: https://searchmysite.net
+
+ - name: sepiasearch
+ engine: sepiasearch
+ shortcut: sep
+
+ - name: soundcloud
+ engine: soundcloud
+ shortcut: sc
+
+ - name: stackoverflow
+ engine: stackexchange
+ shortcut: st
+ api_site: 'stackoverflow'
+ categories: [it, q&a]
+
+ - name: askubuntu
+ engine: stackexchange
+ shortcut: ubuntu
+ api_site: 'askubuntu'
+ categories: [it, q&a]
+
+ - name: internetarchivescholar
+ engine: internet_archive_scholar
+ shortcut: ias
+ timeout: 15.0
+
+ - name: superuser
+ engine: stackexchange
+ shortcut: su
+ api_site: 'superuser'
+ categories: [it, q&a]
+
+ - name: discuss.python
+ engine: discourse
+ shortcut: dpy
+ base_url: 'https://discuss.python.org'
+ categories: [it, q&a]
+ disabled: true
+
+ - name: caddy.community
+ engine: discourse
+ shortcut: caddy
+ base_url: 'https://caddy.community'
+ categories: [it, q&a]
+ disabled: true
+
+ - name: pi-hole.community
+ engine: discourse
+ shortcut: pi
+ categories: [it, q&a]
+ base_url: 'https://discourse.pi-hole.net'
+ disabled: true
+
+ - name: searchcode code
+ engine: searchcode_code
+ shortcut: scc
+ disabled: true
+
+ # - name: searx
+ # engine: searx_engine
+ # shortcut: se
+ # instance_urls :
+ # - http://127.0.0.1:8888/
+ # - ...
+ # disabled: true
+
+ - name: semantic scholar
+ engine: semantic_scholar
+ disabled: true
+ shortcut: se
+
+ # Spotify needs API credentials
+ # - name: spotify
+ # engine: spotify
+ # shortcut: stf
+ # api_client_id: *******
+ # api_client_secret: *******
+
+ # - name: solr
+ # engine: solr
+ # shortcut: slr
+ # base_url: http://localhost:8983
+ # collection: collection_name
+ # sort: '' # sorting: asc or desc
+ # field_list: '' # comma separated list of field names to display on the UI
+ # default_fields: '' # default field to query
+ # query_fields: '' # query fields
+ # enable_http: true
+
+ # - name: springer nature
+ # engine: springer
+ # # get your API key from: https://dev.springernature.com/signup
+ # # working API key, for test & debug: "a69685087d07eca9f13db62f65b8f601"
+ # api_key: 'unset'
+ # shortcut: springer
+ # timeout: 15.0
+
+ - name: startpage
+ engine: startpage
+ shortcut: sp
+ timeout: 6.0
+ disabled: true
+ additional_tests:
+ rosebud: *test_rosebud
+
+ - name: tokyotoshokan
+ engine: tokyotoshokan
+ shortcut: tt
+ timeout: 6.0
+ disabled: true
+
+ - name: solidtorrents
+ engine: solidtorrents
+ shortcut: solid
+ timeout: 4.0
+ base_url:
+ - https://solidtorrents.to
+ - https://bitsearch.to
+
+ # For this demo of the sqlite engine download:
+ # https://liste.mediathekview.de/filmliste-v2.db.bz2
+ # and unpack into searx/data/filmliste-v2.db
+ # Query to test: "!demo concert"
+ #
+ # - name: demo
+ # engine: sqlite
+ # shortcut: demo
+ # categories: general
+ # result_template: default.html
+ # database: searx/data/filmliste-v2.db
+ # query_str: >-
+ # SELECT title || ' (' || time(duration, 'unixepoch') || ')' AS title,
+ # COALESCE( NULLIF(url_video_hd,''), NULLIF(url_video_sd,''), url_video) AS url,
+ # description AS content
+ # FROM film
+ # WHERE title LIKE :wildcard OR description LIKE :wildcard
+ # ORDER BY duration DESC
+
+ - name: tagesschau
+ engine: tagesschau
+ # when set to false, display URLs from Tagesschau, and not the actual source
+ # (e.g. NDR, WDR, SWR, HR, ...)
+ use_source_url: true
+ shortcut: ts
+ disabled: true
+
+ - name: tmdb
+ engine: xpath
+ paging: true
+ categories: movies
+ search_url: https://www.themoviedb.org/search?page={pageno}&query={query}
+ results_xpath: //div[contains(@class,"movie") or contains(@class,"tv")]//div[contains(@class,"card")]
+ url_xpath: .//div[contains(@class,"poster")]/a/@href
+ thumbnail_xpath: .//img/@src
+ title_xpath: .//div[contains(@class,"title")]//h2
+ content_xpath: .//div[contains(@class,"overview")]
+ shortcut: tm
+ disabled: true
+
+ # Requires Tor
+ - name: torch
+ engine: xpath
+ paging: true
+ search_url:
+ http://xmh57jrknzkhv6y3ls3ubitzfqnkrwxhopf5aygthi7d6rplyvk3noyd.onion/cgi-bin/omega/omega?P={query}&DEFAULTOP=and
+ results_xpath: //table//tr
+ url_xpath: ./td[2]/a
+ title_xpath: ./td[2]/b
+ content_xpath: ./td[2]/small
+ categories: onions
+ enable_http: true
+ shortcut: tch
+
+ # torznab engine lets you query any torznab compatible indexer. Using this
+ # engine in combination with Jackett opens the possibility to query a lot of
+ # public and private indexers directly from SearXNG. More details at:
+ # https://docs.searxng.org/dev/engines/online/torznab.html
+ #
+ # - name: Torznab EZTV
+ # engine: torznab
+ # shortcut: eztv
+ # base_url: http://localhost:9117/api/v2.0/indexers/eztv/results/torznab
+ # enable_http: true # if using localhost
+ # api_key: xxxxxxxxxxxxxxx
+ # show_magnet_links: true
+ # show_torrent_files: false
+ # # https://github.com/Jackett/Jackett/wiki/Jackett-Categories
+ # torznab_categories: # optional
+ # - 2000
+ # - 5000
+
+ # tmp suspended - too slow, too many errors
+ # - name: urbandictionary
+ # engine : xpath
+ # search_url : https://www.urbandictionary.com/define.php?term={query}
+ # url_xpath : //*[@class="word"]/@href
+ # title_xpath : //*[@class="def-header"]
+ # content_xpath: //*[@class="meaning"]
+ # shortcut: ud
+
+ - name: unsplash
+ engine: unsplash
+ shortcut: us
+
+ - name: yandex music
+ engine: yandex_music
+ shortcut: ydm
+ disabled: true
+ # https://yandex.com/support/music/access.html
+ inactive: true
+
+ - name: yahoo
+ engine: yahoo
+ shortcut: yh
+ disabled: true
+
+ - name: yahoo news
+ engine: yahoo_news
+ shortcut: yhn
+
+ - name: youtube
+ shortcut: yt
+ # You can use the engine using the official stable API, but you need an API
+ # key See: https://console.developers.google.com/project
+ #
+ # engine: youtube_api
+ # api_key: 'apikey' # required!
+ #
+ # Or you can use the html non-stable engine, activated by default
+ engine: youtube_noapi
+
+ - name: dailymotion
+ engine: dailymotion
+ shortcut: dm
+
+ - name: vimeo
+ engine: vimeo
+ shortcut: vm
+ disabled: true
+
+ - name: wiby
+ engine: json_engine
+ paging: true
+ search_url: https://wiby.me/json/?q={query}&p={pageno}
+ url_query: URL
+ title_query: Title
+ content_query: Snippet
+ categories: [general, web]
+ shortcut: wib
+ disabled: true
+ about:
+ website: https://wiby.me/
+
+ - name: alexandria
+ engine: json_engine
+ shortcut: alx
+ categories: general
+ paging: true
+ search_url: https://api.alexandria.org/?a=1&q={query}&p={pageno}
+ results_query: results
+ title_query: title
+ url_query: url
+ content_query: snippet
+ timeout: 1.5
+ disabled: true
+ about:
+ website: https://alexandria.org/
+ official_api_documentation: https://github.com/alexandria-org/alexandria-api/raw/master/README.md
+ use_official_api: true
+ require_api_key: false
+ results: JSON
+
+ - name: wikibooks
+ engine: mediawiki
+ weight: 0.5
+ shortcut: wb
+ categories: [general, wikimedia]
+ base_url: "https://{language}.wikibooks.org/"
+ search_type: text
+ disabled: true
+ about:
+ website: https://www.wikibooks.org/
+ wikidata_id: Q367
+
+ - name: wikinews
+ engine: mediawiki
+ shortcut: wn
+ categories: [news, wikimedia]
+ base_url: "https://{language}.wikinews.org/"
+ search_type: text
+ srsort: create_timestamp_desc
+ about:
+ website: https://www.wikinews.org/
+ wikidata_id: Q964
+
+ - name: wikiquote
+ engine: mediawiki
+ weight: 0.5
+ shortcut: wq
+ categories: [general, wikimedia]
+ base_url: "https://{language}.wikiquote.org/"
+ search_type: text
+ disabled: true
+ additional_tests:
+ rosebud: *test_rosebud
+ about:
+ website: https://www.wikiquote.org/
+ wikidata_id: Q369
+
+ - name: wikisource
+ engine: mediawiki
+ weight: 0.5
+ shortcut: ws
+ categories: [general, wikimedia]
+ base_url: "https://{language}.wikisource.org/"
+ search_type: text
+ disabled: true
+ about:
+ website: https://www.wikisource.org/
+ wikidata_id: Q263
+
+ - name: wikispecies
+ engine: mediawiki
+ shortcut: wsp
+ categories: [general, science, wikimedia]
+ base_url: "https://species.wikimedia.org/"
+ search_type: text
+ disabled: true
+ about:
+ website: https://species.wikimedia.org/
+ wikidata_id: Q13679
+ tests:
+ wikispecies:
+ matrix:
+ query: "Campbell, L.I. et al. 2011: MicroRNAs"
+ lang: en
+ result_container:
+ - not_empty
+ - ['one_title_contains', 'Tardigrada']
+ test:
+ - unique_results
+
+ - name: wiktionary
+ engine: mediawiki
+ shortcut: wt
+ categories: [dictionaries, wikimedia]
+ base_url: "https://{language}.wiktionary.org/"
+ search_type: text
+ about:
+ website: https://www.wiktionary.org/
+ wikidata_id: Q151
+
+ - name: wikiversity
+ engine: mediawiki
+ weight: 0.5
+ shortcut: wv
+ categories: [general, wikimedia]
+ base_url: "https://{language}.wikiversity.org/"
+ search_type: text
+ disabled: true
+ about:
+ website: https://www.wikiversity.org/
+ wikidata_id: Q370
+
+ - name: wikivoyage
+ engine: mediawiki
+ weight: 0.5
+ shortcut: wy
+ categories: [general, wikimedia]
+ base_url: "https://{language}.wikivoyage.org/"
+ search_type: text
+ disabled: true
+ about:
+ website: https://www.wikivoyage.org/
+ wikidata_id: Q373
+
+ - name: wikicommons.images
+ engine: wikicommons
+ shortcut: wc
+ categories: images
+ search_type: images
+ number_of_results: 10
+
+ - name: wikicommons.videos
+ engine: wikicommons
+ shortcut: wcv
+ categories: videos
+ search_type: videos
+ number_of_results: 10
+
+ - name: wikicommons.audio
+ engine: wikicommons
+ shortcut: wca
+ categories: music
+ search_type: audio
+ number_of_results: 10
+
+ - name: wikicommons.files
+ engine: wikicommons
+ shortcut: wcf
+ categories: files
+ search_type: files
+ number_of_results: 10
+
+ - name: wolframalpha
+ shortcut: wa
+ # You can use the engine using the official stable API, but you need an API
+ # key. See: https://products.wolframalpha.com/api/
+ #
+ # engine: wolframalpha_api
+ # api_key: ''
+ #
+ # Or you can use the html non-stable engine, activated by default
+ engine: wolframalpha_noapi
+ timeout: 6.0
+ categories: general
+ disabled: true
+
+ - name: dictzone
+ engine: dictzone
+ shortcut: dc
+
+ - name: mymemory translated
+ engine: translated
+ shortcut: tl
+ timeout: 5.0
+ # You can use without an API key, but you are limited to 1000 words/day
+ # See: https://mymemory.translated.net/doc/usagelimits.php
+ # api_key: ''
+
+ # Required dependency: mysql-connector-python
+ # - name: mysql
+ # engine: mysql_server
+ # database: mydatabase
+ # username: user
+ # password: pass
+ # limit: 10
+ # query_str: 'SELECT * from mytable WHERE fieldname=%(query)s'
+ # shortcut: mysql
+
+ - name: 1337x
+ engine: 1337x
+ shortcut: 1337x
+ disabled: true
+
+ - name: duden
+ engine: duden
+ shortcut: du
+ disabled: true
+
+ - name: seznam
+ shortcut: szn
+ engine: seznam
+ disabled: true
+
+ # - name: deepl
+ # engine: deepl
+ # shortcut: dpl
+ # # You can use the engine using the official stable API, but you need an API key
+ # # See: https://www.deepl.com/pro-api?cta=header-pro-api
+ # api_key: '' # required!
+ # timeout: 5.0
+ # disabled: true
+
+ - name: mojeek
+ shortcut: mjk
+ engine: mojeek
+ categories: [general, web]
+ disabled: true
+
+ - name: mojeek images
+ shortcut: mjkimg
+ engine: mojeek
+ categories: [images, web]
+ search_type: images
+ paging: false
+ disabled: true
+
+ - name: mojeek news
+ shortcut: mjknews
+ engine: mojeek
+ categories: [news, web]
+ search_type: news
+ paging: false
+ disabled: true
+
+ - name: moviepilot
+ engine: moviepilot
+ shortcut: mp
+ disabled: true
+
+ - name: naver
+ shortcut: nvr
+ categories: [general, web]
+ engine: xpath
+ paging: true
+ search_url: https://search.naver.com/search.naver?where=webkr&sm=osp_hty&ie=UTF-8&query={query}&start={pageno}
+ url_xpath: //a[@class="link_tit"]/@href
+ title_xpath: //a[@class="link_tit"]
+ content_xpath: //div[@class="total_dsc_wrap"]/a
+ first_page_num: 1
+ page_size: 10
+ disabled: true
+ about:
+ website: https://www.naver.com/
+ wikidata_id: Q485639
+ official_api_documentation: https://developers.naver.com/docs/nmt/examples/
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+ language: ko
+
+ - name: rubygems
+ shortcut: rbg
+ engine: xpath
+ paging: true
+ search_url: https://rubygems.org/search?page={pageno}&query={query}
+ results_xpath: /html/body/main/div/a[@class="gems__gem"]
+ url_xpath: ./@href
+ title_xpath: ./span/h2
+ content_xpath: ./span/p
+ suggestion_xpath: /html/body/main/div/div[@class="search__suggestions"]/p/a
+ first_page_num: 1
+ categories: [it, packages]
+ disabled: true
+ about:
+ website: https://rubygems.org/
+ wikidata_id: Q1853420
+ official_api_documentation: https://guides.rubygems.org/rubygems-org-api/
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: peertube
+ engine: peertube
+ shortcut: ptb
+ paging: true
+ # alternatives see: https://instances.joinpeertube.org/instances
+ # base_url: https://tube.4aem.com
+ categories: videos
+ disabled: true
+ timeout: 6.0
+
+ - name: mediathekviewweb
+ engine: mediathekviewweb
+ shortcut: mvw
+ disabled: true
+
+ - name: yacy
+ # https://docs.searxng.org/dev/engines/online/yacy.html
+ engine: yacy
+ categories: general
+ search_type: text
+ base_url:
+ - https://yacy.searchlab.eu
+ # see https://github.com/searxng/searxng/pull/3631#issuecomment-2240903027
+ # - https://search.kyun.li
+ # - https://yacy.securecomcorp.eu
+ # - https://yacy.myserv.ca
+ # - https://yacy.nsupdate.info
+ # - https://yacy.electroncash.de
+ shortcut: ya
+ disabled: true
+ # if you aren't using HTTPS for your local yacy instance disable https
+ # enable_http: false
+ search_mode: 'global'
+ # timeout can be reduced in 'local' search mode
+ timeout: 5.0
+
+ - name: yacy images
+ engine: yacy
+ network: yacy
+ categories: images
+ search_type: image
+ shortcut: yai
+ disabled: true
+ # timeout can be reduced in 'local' search mode
+ timeout: 5.0
+
+ - name: rumble
+ engine: rumble
+ shortcut: ru
+ base_url: https://rumble.com/
+ paging: true
+ categories: videos
+ disabled: true
+
+ - name: livespace
+ engine: livespace
+ shortcut: ls
+ categories: videos
+ disabled: true
+ timeout: 5.0
+
+ - name: wordnik
+ engine: wordnik
+ shortcut: def
+ base_url: https://www.wordnik.com/
+ categories: [dictionaries]
+ timeout: 5.0
+
+ - name: woxikon.de synonyme
+ engine: xpath
+ shortcut: woxi
+ categories: [dictionaries]
+ timeout: 5.0
+ disabled: true
+ search_url: https://synonyme.woxikon.de/synonyme/{query}.php
+ url_xpath: //div[@class="upper-synonyms"]/a/@href
+ content_xpath: //div[@class="synonyms-list-group"]
+ title_xpath: //div[@class="upper-synonyms"]/a
+ no_result_for_http_status: [404]
+ about:
+ website: https://www.woxikon.de/
+ wikidata_id: # No Wikidata ID
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+ language: de
+
+ - name: seekr news
+ engine: seekr
+ shortcut: senews
+ categories: news
+ seekr_category: news
+ disabled: true
+
+ - name: seekr images
+ engine: seekr
+ network: seekr news
+ shortcut: seimg
+ categories: images
+ seekr_category: images
+ disabled: true
+
+ - name: seekr videos
+ engine: seekr
+ network: seekr news
+ shortcut: sevid
+ categories: videos
+ seekr_category: videos
+ disabled: true
+
+ - name: sjp.pwn
+ engine: sjp
+ shortcut: sjp
+ base_url: https://sjp.pwn.pl/
+ timeout: 5.0
+ disabled: true
+
+ - name: stract
+ engine: stract
+ shortcut: str
+ disabled: true
+
+ - name: svgrepo
+ engine: svgrepo
+ shortcut: svg
+ timeout: 10.0
+ disabled: true
+
+ - name: tootfinder
+ engine: tootfinder
+ shortcut: toot
+
+ - name: voidlinux
+ engine: voidlinux
+ shortcut: void
+ disabled: true
+
+ - name: wallhaven
+ engine: wallhaven
+ # api_key: abcdefghijklmnopqrstuvwxyz
+ shortcut: wh
+
+ # wikimini: online encyclopedia for children
+ # The fulltext and title parameter is necessary for Wikimini because
+ # sometimes it will not show the results and redirect instead
+ - name: wikimini
+ engine: xpath
+ shortcut: wkmn
+ search_url: https://fr.wikimini.org/w/index.php?search={query}&title=Sp%C3%A9cial%3ASearch&fulltext=Search
+ url_xpath: //li/div[@class="mw-search-result-heading"]/a/@href
+ title_xpath: //li//div[@class="mw-search-result-heading"]/a
+ content_xpath: //li/div[@class="searchresult"]
+ categories: general
+ disabled: true
+ about:
+ website: https://wikimini.org/
+ wikidata_id: Q3568032
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+ language: fr
+
+ - name: wttr.in
+ engine: wttr
+ shortcut: wttr
+ timeout: 9.0
+
+ - name: yummly
+ engine: yummly
+ shortcut: yum
+ disabled: true
+
+ - name: brave
+ engine: brave
+ shortcut: br
+ time_range_support: true
+ paging: true
+ categories: [general, web]
+ brave_category: search
+ # brave_spellcheck: true
+
+ - name: brave.images
+ engine: brave
+ network: brave
+ shortcut: brimg
+ categories: [images, web]
+ brave_category: images
+
+ - name: brave.videos
+ engine: brave
+ network: brave
+ shortcut: brvid
+ categories: [videos, web]
+ brave_category: videos
+
+ - name: brave.news
+ engine: brave
+ network: brave
+ shortcut: brnews
+ categories: news
+ brave_category: news
+
+ # - name: brave.goggles
+ # engine: brave
+ # network: brave
+ # shortcut: brgog
+ # time_range_support: true
+ # paging: true
+ # categories: [general, web]
+ # brave_category: goggles
+ # Goggles: # required! This should be a URL ending in .goggle
+
+ - name: lib.rs
+ shortcut: lrs
+ engine: lib_rs
+ disabled: true
+
+ - name: sourcehut
+ shortcut: srht
+ engine: xpath
+ paging: true
+ search_url: https://sr.ht/projects?page={pageno}&search={query}
+ results_xpath: (//div[@class="event-list"])[1]/div[@class="event"]
+ url_xpath: ./h4/a[2]/@href
+ title_xpath: ./h4/a[2]
+ content_xpath: ./p
+ first_page_num: 1
+ categories: [it, repos]
+ disabled: true
+ about:
+ website: https://sr.ht
+ wikidata_id: Q78514485
+ official_api_documentation: https://man.sr.ht/
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+
+ - name: goo
+ shortcut: goo
+ engine: xpath
+ paging: true
+ search_url: https://search.goo.ne.jp/web.jsp?MT={query}&FR={pageno}0
+ url_xpath: //div[@class="result"]/p[@class='title fsL1']/a/@href
+ title_xpath: //div[@class="result"]/p[@class='title fsL1']/a
+ content_xpath: //p[contains(@class,'url fsM')]/following-sibling::p
+ first_page_num: 0
+ categories: [general, web]
+ disabled: true
+ timeout: 4.0
+ about:
+ website: https://search.goo.ne.jp
+ wikidata_id: Q249044
+ use_official_api: false
+ require_api_key: false
+ results: HTML
+ language: ja
+
+ - name: bt4g
+ engine: bt4g
+ shortcut: bt4g
+
+ - name: pkg.go.dev
+ engine: pkg_go_dev
+ shortcut: pgo
+ disabled: true
+
+# Doku engine lets you access to any Doku wiki instance:
+# A public one or a privete/corporate one.
+# - name: ubuntuwiki
+# engine: doku
+# shortcut: uw
+# base_url: 'https://doc.ubuntu-fr.org'
+
+# Be careful when enabling this engine if you are
+# running a public instance. Do not expose any sensitive
+# information. You can restrict access by configuring a list
+# of access tokens under tokens.
+# - name: git grep
+# engine: command
+# command: ['git', 'grep', '{{QUERY}}']
+# shortcut: gg
+# tokens: []
+# disabled: true
+# delimiter:
+# chars: ':'
+# keys: ['filepath', 'code']
+
+# Be careful when enabling this engine if you are
+# running a public instance. Do not expose any sensitive
+# information. You can restrict access by configuring a list
+# of access tokens under tokens.
+# - name: locate
+# engine: command
+# command: ['locate', '{{QUERY}}']
+# shortcut: loc
+# tokens: []
+# disabled: true
+# delimiter:
+# chars: ' '
+# keys: ['line']
+
+# Be careful when enabling this engine if you are
+# running a public instance. Do not expose any sensitive
+# information. You can restrict access by configuring a list
+# of access tokens under tokens.
+# - name: find
+# engine: command
+# command: ['find', '.', '-name', '{{QUERY}}']
+# query_type: path
+# shortcut: fnd
+# tokens: []
+# disabled: true
+# delimiter:
+# chars: ' '
+# keys: ['line']
+
+# Be careful when enabling this engine if you are
+# running a public instance. Do not expose any sensitive
+# information. You can restrict access by configuring a list
+# of access tokens under tokens.
+# - name: pattern search in files
+# engine: command
+# command: ['fgrep', '{{QUERY}}']
+# shortcut: fgr
+# tokens: []
+# disabled: true
+# delimiter:
+# chars: ' '
+# keys: ['line']
+
+# Be careful when enabling this engine if you are
+# running a public instance. Do not expose any sensitive
+# information. You can restrict access by configuring a list
+# of access tokens under tokens.
+# - name: regex search in files
+# engine: command
+# command: ['grep', '{{QUERY}}']
+# shortcut: gr
+# tokens: []
+# disabled: true
+# delimiter:
+# chars: ' '
+# keys: ['line']
+
+doi_resolvers:
+ oadoi.org: 'https://oadoi.org/'
+ doi.org: 'https://doi.org/'
+ doai.io: 'https://dissem.in/'
+ sci-hub.se: 'https://sci-hub.se/'
+ sci-hub.st: 'https://sci-hub.st/'
+ sci-hub.ru: 'https://sci-hub.ru/'
+
+default_doi_resolver: 'oadoi.org'
diff --git a/api/core/tools/provider/builtin/searxng/docker/uwsgi.ini b/api/core/tools/provider/builtin/searxng/docker/uwsgi.ini
new file mode 100644
index 00000000000000..9db3d762649fc5
--- /dev/null
+++ b/api/core/tools/provider/builtin/searxng/docker/uwsgi.ini
@@ -0,0 +1,54 @@
+[uwsgi]
+# Who will run the code
+uid = searxng
+gid = searxng
+
+# Number of workers (usually CPU count)
+# default value: %k (= number of CPU core, see Dockerfile)
+workers = %k
+
+# Number of threads per worker
+# default value: 4 (see Dockerfile)
+threads = 4
+
+# The right granted on the created socket
+chmod-socket = 666
+
+# Plugin to use and interpreter config
+single-interpreter = true
+master = true
+plugin = python3
+lazy-apps = true
+enable-threads = 4
+
+# Module to import
+module = searx.webapp
+
+# Virtualenv and python path
+pythonpath = /usr/local/searxng/
+chdir = /usr/local/searxng/searx/
+
+# automatically set processes name to something meaningful
+auto-procname = true
+
+# Disable request logging for privacy
+disable-logging = true
+log-5xx = true
+
+# Set the max size of a request (request-body excluded)
+buffer-size = 8192
+
+# No keep alive
+# See https://github.com/searx/searx-docker/issues/24
+add-header = Connection: close
+
+# Follow SIGTERM convention
+# See https://github.com/searxng/searxng/issues/3427
+die-on-term
+
+# uwsgi serves the static files
+static-map = /static=/usr/local/searxng/searx/static
+# expires set to one day
+static-expires = /* 86400
+static-gzip-all = True
+offload-threads = 4
diff --git a/api/core/tools/provider/builtin/searxng/searxng.py b/api/core/tools/provider/builtin/searxng/searxng.py
index 24b94b5ca4a391..ab354003e6f567 100644
--- a/api/core/tools/provider/builtin/searxng/searxng.py
+++ b/api/core/tools/provider/builtin/searxng/searxng.py
@@ -17,8 +17,7 @@ def _validate_credentials(self, credentials: dict[str, Any]) -> None:
tool_parameters={
"query": "SearXNG",
"limit": 1,
- "search_type": "page",
- "result_type": "link"
+ "search_type": "general"
},
)
except Exception as e:
diff --git a/api/core/tools/provider/builtin/searxng/searxng.yaml b/api/core/tools/provider/builtin/searxng/searxng.yaml
index 64bd428280d835..9554c93d5a0c53 100644
--- a/api/core/tools/provider/builtin/searxng/searxng.yaml
+++ b/api/core/tools/provider/builtin/searxng/searxng.yaml
@@ -6,21 +6,18 @@ identity:
zh_Hans: SearXNG
description:
en_US: A free internet metasearch engine.
- zh_Hans: 开源互联网元搜索引擎
+ zh_Hans: 开源免费的互联网元搜索引擎
icon: icon.svg
tags:
- search
- productivity
credentials_for_provider:
searxng_base_url:
- type: secret-input
+ type: text-input
required: true
label:
en_US: SearXNG base URL
zh_Hans: SearXNG base URL
- help:
- en_US: Please input your SearXNG base URL
- zh_Hans: 请输入您的 SearXNG base URL
placeholder:
en_US: Please input your SearXNG base URL
zh_Hans: 请输入您的 SearXNG base URL
diff --git a/api/core/tools/provider/builtin/searxng/tools/searxng_search.py b/api/core/tools/provider/builtin/searxng/tools/searxng_search.py
index 5d12553629abfe..dc835a8e8cbd5b 100644
--- a/api/core/tools/provider/builtin/searxng/tools/searxng_search.py
+++ b/api/core/tools/provider/builtin/searxng/tools/searxng_search.py
@@ -1,4 +1,3 @@
-import json
from typing import Any
import requests
@@ -7,90 +6,11 @@
from core.tools.tool.builtin_tool import BuiltinTool
-class SearXNGSearchResults(dict):
- """Wrapper for search results."""
-
- def __init__(self, data: str):
- super().__init__(json.loads(data))
- self.__dict__ = self
-
- @property
- def results(self) -> Any:
- return self.get("results", [])
-
-
class SearXNGSearchTool(BuiltinTool):
"""
Tool for performing a search using SearXNG engine.
"""
- SEARCH_TYPE: dict[str, str] = {
- "page": "general",
- "news": "news",
- "image": "images",
- # "video": "videos",
- # "file": "files"
- }
- LINK_FILED: dict[str, str] = {
- "page": "url",
- "news": "url",
- "image": "img_src",
- # "video": "iframe_src",
- # "file": "magnetlink"
- }
- TEXT_FILED: dict[str, str] = {
- "page": "content",
- "news": "content",
- "image": "img_src",
- # "video": "iframe_src",
- # "file": "magnetlink"
- }
-
- def _invoke_query(self, user_id: str, host: str, query: str, search_type: str, result_type: str, topK: int = 5) -> list[dict]:
- """Run query and return the results."""
-
- search_type = search_type.lower()
- if search_type not in self.SEARCH_TYPE.keys():
- search_type= "page"
-
- response = requests.get(host, params={
- "q": query,
- "format": "json",
- "categories": self.SEARCH_TYPE[search_type]
- })
-
- if response.status_code != 200:
- raise Exception(f'Error {response.status_code}: {response.text}')
-
- search_results = SearXNGSearchResults(response.text).results[:topK]
-
- if result_type == 'link':
- results = []
- if search_type == "page" or search_type == "news":
- for r in search_results:
- results.append(self.create_text_message(
- text=f'{r["title"]}: {r.get(self.LINK_FILED[search_type], "")}'
- ))
- elif search_type == "image":
- for r in search_results:
- results.append(self.create_image_message(
- image=r.get(self.LINK_FILED[search_type], "")
- ))
- else:
- for r in search_results:
- results.append(self.create_link_message(
- link=r.get(self.LINK_FILED[search_type], "")
- ))
-
- return results
- else:
- text = ''
- for i, r in enumerate(search_results):
- text += f'{i+1}: {r["title"]} - {r.get(self.TEXT_FILED[search_type], "")}\n'
-
- return self.create_text_message(text=self.summary(user_id=user_id, content=text))
-
-
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]:
"""
Invoke the SearXNG search tool.
@@ -103,23 +23,21 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMe
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation.
"""
- host = self.runtime.credentials.get('searxng_base_url', None)
+ host = self.runtime.credentials.get('searxng_base_url')
if not host:
raise Exception('SearXNG api is required')
-
- query = tool_parameters.get('query')
- if not query:
- return self.create_text_message('Please input query')
-
- num_results = min(tool_parameters.get('num_results', 5), 20)
- search_type = tool_parameters.get('search_type', 'page') or 'page'
- result_type = tool_parameters.get('result_type', 'text') or 'text'
- return self._invoke_query(
- user_id=user_id,
- host=host,
- query=query,
- search_type=search_type,
- result_type=result_type,
- topK=num_results
- )
+ response = requests.get(host, params={
+ "q": tool_parameters.get('query'),
+ "format": "json",
+ "categories": tool_parameters.get('search_type', 'general')
+ })
+
+ if response.status_code != 200:
+ raise Exception(f'Error {response.status_code}: {response.text}')
+
+ res = response.json().get("results", [])
+ if not res:
+ return self.create_text_message(f"No results found, get response: {response.content}")
+
+ return [self.create_json_message(item) for item in res]
diff --git a/api/core/tools/provider/builtin/searxng/tools/searxng_search.yaml b/api/core/tools/provider/builtin/searxng/tools/searxng_search.yaml
index 0edf1744f4b2f4..a5e448a30375b4 100644
--- a/api/core/tools/provider/builtin/searxng/tools/searxng_search.yaml
+++ b/api/core/tools/provider/builtin/searxng/tools/searxng_search.yaml
@@ -1,13 +1,13 @@
identity:
name: searxng_search
- author: Tice
+ author: Junytang
label:
en_US: SearXNG Search
zh_Hans: SearXNG 搜索
description:
human:
- en_US: Perform searches on SearXNG and get results.
- zh_Hans: 在 SearXNG 上进行搜索并获取结果。
+ en_US: SearXNG is a free internet metasearch engine which aggregates results from more than 70 search services.
+ zh_Hans: SearXNG 是一个免费的互联网元搜索引擎,它从70多个不同的搜索服务中聚合搜索结果。
llm: Perform searches on SearXNG and get results.
parameters:
- name: query
@@ -16,9 +16,6 @@ parameters:
label:
en_US: Query string
zh_Hans: 查询语句
- human_description:
- en_US: The search query.
- zh_Hans: 搜索查询语句。
llm_description: Key words for searching
form: llm
- name: search_type
@@ -27,63 +24,46 @@ parameters:
label:
en_US: search type
zh_Hans: 搜索类型
- pt_BR: search type
- human_description:
- en_US: search type for page, news or image.
- zh_Hans: 选择搜索的类型:网页,新闻,图片。
- pt_BR: search type for page, news or image.
- default: Page
+ default: general
options:
- - value: Page
+ - value: general
label:
- en_US: Page
- zh_Hans: 网页
- pt_BR: Page
- - value: News
+ en_US: General
+ zh_Hans: 综合
+ - value: images
+ label:
+ en_US: Images
+ zh_Hans: 图片
+ - value: videos
+ label:
+ en_US: Videos
+ zh_Hans: 视频
+ - value: news
label:
en_US: News
zh_Hans: 新闻
- pt_BR: News
- - value: Image
+ - value: map
label:
- en_US: Image
- zh_Hans: 图片
- pt_BR: Image
- form: form
- - name: num_results
- type: number
- required: true
- label:
- en_US: Number of query results
- zh_Hans: 返回查询数量
- human_description:
- en_US: The number of query results.
- zh_Hans: 返回查询结果的数量。
- form: form
- default: 5
- min: 1
- max: 20
- - name: result_type
- type: select
- required: true
- label:
- en_US: result type
- zh_Hans: 结果类型
- pt_BR: result type
- human_description:
- en_US: return a list of links or texts.
- zh_Hans: 返回一个连接列表还是纯文本内容。
- pt_BR: return a list of links or texts.
- default: text
- options:
- - value: link
+ en_US: Map
+ zh_Hans: 地图
+ - value: music
+ label:
+ en_US: Music
+ zh_Hans: 音乐
+ - value: it
+ label:
+ en_US: It
+ zh_Hans: 信息技术
+ - value: science
+ label:
+ en_US: Science
+ zh_Hans: 科学
+ - value: files
label:
- en_US: Link
- zh_Hans: 链接
- pt_BR: Link
- - value: text
+ en_US: Files
+ zh_Hans: 文件
+ - value: social_media
label:
- en_US: Text
- zh_Hans: 文本
- pt_BR: Text
+ en_US: Social Media
+ zh_Hans: 社交媒体
form: form
diff --git a/api/core/tools/provider/builtin/stepfun/__init__.py b/api/core/tools/provider/builtin/stepfun/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/core/tools/provider/builtin/stepfun/_assets/icon.png b/api/core/tools/provider/builtin/stepfun/_assets/icon.png
new file mode 100644
index 00000000000000..85b96d0c74c24c
Binary files /dev/null and b/api/core/tools/provider/builtin/stepfun/_assets/icon.png differ
diff --git a/api/core/tools/provider/builtin/stepfun/stepfun.py b/api/core/tools/provider/builtin/stepfun/stepfun.py
new file mode 100644
index 00000000000000..e809b04546aef5
--- /dev/null
+++ b/api/core/tools/provider/builtin/stepfun/stepfun.py
@@ -0,0 +1,25 @@
+from typing import Any
+
+from core.tools.errors import ToolProviderCredentialValidationError
+from core.tools.provider.builtin.stepfun.tools.image import StepfunTool
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+
+
+class StepfunProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict[str, Any]) -> None:
+ try:
+ StepfunTool().fork_tool_runtime(
+ runtime={
+ "credentials": credentials,
+ }
+ ).invoke(
+ user_id='',
+ tool_parameters={
+ "prompt": "cute girl, blue eyes, white hair, anime style",
+ "size": "1024x1024",
+ "n": 1
+ },
+ )
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
+
\ No newline at end of file
diff --git a/api/core/tools/provider/builtin/stepfun/stepfun.yaml b/api/core/tools/provider/builtin/stepfun/stepfun.yaml
new file mode 100644
index 00000000000000..1f841ec369b5c3
--- /dev/null
+++ b/api/core/tools/provider/builtin/stepfun/stepfun.yaml
@@ -0,0 +1,46 @@
+identity:
+ author: Stepfun
+ name: stepfun
+ label:
+ en_US: Image-1X
+ zh_Hans: 阶跃星辰绘画
+ pt_BR: Image-1X
+ description:
+ en_US: Image-1X
+ zh_Hans: 阶跃星辰绘画
+ pt_BR: Image-1X
+ icon: icon.png
+ tags:
+ - image
+ - productivity
+credentials_for_provider:
+ stepfun_api_key:
+ type: secret-input
+ required: true
+ label:
+ en_US: Stepfun API key
+ zh_Hans: 阶跃星辰API key
+ pt_BR: Stepfun API key
+ help:
+ en_US: Please input your stepfun API key
+ zh_Hans: 请输入你的阶跃星辰 API key
+ pt_BR: Please input your stepfun API key
+ placeholder:
+ en_US: Please input your stepfun API key
+ zh_Hans: 请输入你的阶跃星辰 API key
+ pt_BR: Please input your stepfun API key
+ stepfun_base_url:
+ type: text-input
+ required: false
+ label:
+ en_US: Stepfun base URL
+ zh_Hans: 阶跃星辰 base URL
+ pt_BR: Stepfun base URL
+ help:
+ en_US: Please input your Stepfun base URL
+ zh_Hans: 请输入你的阶跃星辰 base URL
+ pt_BR: Please input your Stepfun base URL
+ placeholder:
+ en_US: Please input your Stepfun base URL
+ zh_Hans: 请输入你的阶跃星辰 base URL
+ pt_BR: Please input your Stepfun base URL
diff --git a/api/core/tools/provider/builtin/stepfun/tools/image.py b/api/core/tools/provider/builtin/stepfun/tools/image.py
new file mode 100644
index 00000000000000..5e544aada63b40
--- /dev/null
+++ b/api/core/tools/provider/builtin/stepfun/tools/image.py
@@ -0,0 +1,72 @@
+import random
+from typing import Any, Union
+
+from openai import OpenAI
+from yarl import URL
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+
+
+class StepfunTool(BuiltinTool):
+ """ Stepfun Image Generation Tool """
+ def _invoke(self,
+ user_id: str,
+ tool_parameters: dict[str, Any],
+ ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
+ """
+ invoke tools
+ """
+ base_url = self.runtime.credentials.get('stepfun_base_url', None)
+ if not base_url:
+ base_url = None
+ else:
+ base_url = str(URL(base_url) / 'v1')
+
+ client = OpenAI(
+ api_key=self.runtime.credentials['stepfun_api_key'],
+ base_url=base_url,
+ )
+
+ extra_body = {}
+ model = tool_parameters.get('model', 'step-1x-medium')
+ if not model:
+ return self.create_text_message('Please input model name')
+ # prompt
+ prompt = tool_parameters.get('prompt', '')
+ if not prompt:
+ return self.create_text_message('Please input prompt')
+
+ seed = tool_parameters.get('seed', 0)
+ if seed > 0:
+ extra_body['seed'] = seed
+ steps = tool_parameters.get('steps', 0)
+ if steps > 0:
+ extra_body['steps'] = steps
+ negative_prompt = tool_parameters.get('negative_prompt', '')
+ if negative_prompt:
+ extra_body['negative_prompt'] = negative_prompt
+
+ # call openapi stepfun model
+ response = client.images.generate(
+ prompt=prompt,
+ model=model,
+ size=tool_parameters.get('size', '1024x1024'),
+ n=tool_parameters.get('n', 1),
+ extra_body= extra_body
+ )
+ print(response)
+
+ result = []
+ for image in response.data:
+ result.append(self.create_image_message(image=image.url))
+ result.append(self.create_json_message({
+ "url": image.url,
+ }))
+ return result
+
+ @staticmethod
+ def _generate_random_id(length=8):
+ characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
+ random_id = ''.join(random.choices(characters, k=length))
+ return random_id
diff --git a/api/core/tools/provider/builtin/stepfun/tools/image.yaml b/api/core/tools/provider/builtin/stepfun/tools/image.yaml
new file mode 100644
index 00000000000000..1e20b157aa131f
--- /dev/null
+++ b/api/core/tools/provider/builtin/stepfun/tools/image.yaml
@@ -0,0 +1,158 @@
+identity:
+ name: stepfun
+ author: Stepfun
+ label:
+ en_US: step-1x
+ zh_Hans: 阶跃星辰绘画
+ pt_BR: step-1x
+ description:
+ en_US: step-1x is a powerful drawing tool by stepfun, you can draw the image based on your prompt
+ zh_Hans: step-1x 系列是阶跃星辰提供的强大的绘画工具,它可以根据您的提示词绘制出您想要的图像。
+ pt_BR: step-1x is a powerful drawing tool by stepfun, you can draw the image based on your prompt
+description:
+ human:
+ en_US: step-1x is a text to image tool
+ zh_Hans: step-1x 是一个文本/图像到图像的工具
+ pt_BR: step-1x is a text to image tool
+ llm: step-1x is a tool used to generate images from text or image
+parameters:
+ - name: prompt
+ type: string
+ required: true
+ label:
+ en_US: Prompt
+ zh_Hans: 提示词
+ pt_BR: Prompt
+ human_description:
+ en_US: Image prompt, you can check the official documentation of step-1x
+ zh_Hans: 图像提示词,您可以查看step-1x 的官方文档
+ pt_BR: Image prompt, you can check the official documentation of step-1x
+ llm_description: Image prompt of step-1x you should describe the image you want to generate as a list of words as possible as detailed
+ form: llm
+ - name: model
+ type: select
+ required: false
+ human_description:
+ en_US: used for selecting the model name
+ zh_Hans: 用于选择模型的名字
+ pt_BR: used for selecting the model name
+ label:
+ en_US: Model Name
+ zh_Hans: 模型名字
+ pt_BR: Model Name
+ form: form
+ options:
+ - value: step-1x-turbo
+ label:
+ en_US: turbo
+ zh_Hans: turbo
+ pt_BR: turbo
+ - value: step-1x-medium
+ label:
+ en_US: medium
+ zh_Hans: medium
+ pt_BR: medium
+ - value: step-1x-large
+ label:
+ en_US: large
+ zh_Hans: large
+ pt_BR: large
+ default: step-1x-medium
+ - name: size
+ type: select
+ required: false
+ human_description:
+ en_US: used for selecting the image size
+ zh_Hans: 用于选择图像大小
+ pt_BR: used for selecting the image size
+ label:
+ en_US: Image size
+ zh_Hans: 图像大小
+ pt_BR: Image size
+ form: form
+ options:
+ - value: 256x256
+ label:
+ en_US: 256x256
+ zh_Hans: 256x256
+ pt_BR: 256x256
+ - value: 512x512
+ label:
+ en_US: 512x512
+ zh_Hans: 512x512
+ pt_BR: 512x512
+ - value: 768x768
+ label:
+ en_US: 768x768
+ zh_Hans: 768x768
+ pt_BR: 768x768
+ - value: 1024x1024
+ label:
+ en_US: 1024x1024
+ zh_Hans: 1024x1024
+ pt_BR: 1024x1024
+ - value: 1280x800
+ label:
+ en_US: 1280x800
+ zh_Hans: 1280x800
+ pt_BR: 1280x800
+ - value: 800x1280
+ label:
+ en_US: 800x1280
+ zh_Hans: 800x1280
+ pt_BR: 800x1280
+ default: 1024x1024
+ - name: n
+ type: number
+ required: true
+ human_description:
+ en_US: used for selecting the number of images
+ zh_Hans: 用于选择图像数量
+ pt_BR: used for selecting the number of images
+ label:
+ en_US: Number of images
+ zh_Hans: 图像数量
+ pt_BR: Number of images
+ form: form
+ default: 1
+ min: 1
+ max: 10
+ - name: seed
+ type: number
+ required: false
+ label:
+ en_US: seed
+ zh_Hans: seed
+ pt_BR: seed
+ human_description:
+ en_US: seed
+ zh_Hans: seed
+ pt_BR: seed
+ form: form
+ default: 10
+ - name: steps
+ type: number
+ required: false
+ label:
+ en_US: Steps
+ zh_Hans: Steps
+ pt_BR: Steps
+ human_description:
+ en_US: Steps
+ zh_Hans: Steps
+ pt_BR: Steps
+ form: form
+ default: 10
+ - name: negative_prompt
+ type: string
+ required: false
+ label:
+ en_US: Negative prompt
+ zh_Hans: Negative prompt
+ pt_BR: Negative prompt
+ human_description:
+ en_US: Negative prompt
+ zh_Hans: Negative prompt
+ pt_BR: Negative prompt
+ form: form
+ default: (worst quality:1.3), (nsfw), low quality
diff --git a/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py b/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py
index 1a0933af160365..7cb7c033bbe9f1 100644
--- a/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py
+++ b/api/core/tools/tool/dataset_retriever/dataset_multi_retriever_tool.py
@@ -177,10 +177,12 @@ def _retriever(self, flask_app: Flask, dataset_id: str, query: str, all_document
dataset_id=dataset.id,
query=query,
top_k=self.top_k,
- score_threshold=retrieval_model['score_threshold']
+ score_threshold=retrieval_model.get('score_threshold', .0)
if retrieval_model['score_threshold_enabled'] else None,
- reranking_model=retrieval_model['reranking_model']
+ reranking_model=retrieval_model.get('reranking_model', None)
if retrieval_model['reranking_enable'] else None,
+ reranking_mode=retrieval_model.get('reranking_mode')
+ if retrieval_model.get('reranking_mode') else 'reranking_model',
weights=retrieval_model.get('weights', None),
)
diff --git a/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py b/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py
index 397ff7966e46f2..a7e70af6286544 100644
--- a/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py
+++ b/api/core/tools/tool/dataset_retriever/dataset_retriever_tool.py
@@ -14,6 +14,7 @@
'reranking_provider_name': '',
'reranking_model_name': ''
},
+ 'reranking_mode': 'reranking_model',
'top_k': 2,
'score_threshold_enabled': False
}
@@ -71,14 +72,16 @@ def _run(self, query: str) -> str:
else:
if self.top_k > 0:
# retrieval source
- documents = RetrievalService.retrieve(retrival_method=retrieval_model['search_method'],
+ documents = RetrievalService.retrieve(retrival_method=retrieval_model.get('search_method', 'semantic_search'),
dataset_id=dataset.id,
query=query,
top_k=self.top_k,
- score_threshold=retrieval_model['score_threshold']
+ score_threshold=retrieval_model.get('score_threshold', .0)
if retrieval_model['score_threshold_enabled'] else None,
- reranking_model=retrieval_model['reranking_model']
+ reranking_model=retrieval_model.get('reranking_model', None)
if retrieval_model['reranking_enable'] else None,
+ reranking_mode=retrieval_model.get('reranking_mode')
+ if retrieval_model.get('reranking_mode') else 'reranking_model',
weights=retrieval_model.get('weights', None),
)
else:
diff --git a/api/core/tools/tool/tool.py b/api/core/tools/tool/tool.py
index 5d561911d12564..d990131b5fbbfd 100644
--- a/api/core/tools/tool/tool.py
+++ b/api/core/tools/tool/tool.py
@@ -2,13 +2,12 @@
from collections.abc import Mapping
from copy import deepcopy
from enum import Enum
-from typing import Any, Optional, Union
+from typing import TYPE_CHECKING, Any, Optional, Union
from pydantic import BaseModel, ConfigDict, field_validator
from pydantic_core.core_schema import ValidationInfo
from core.app.entities.app_invoke_entities import InvokeFrom
-from core.file.file_obj import FileVar
from core.tools.entities.tool_entities import (
ToolDescription,
ToolIdentity,
@@ -23,6 +22,9 @@
from core.tools.tool_file_manager import ToolFileManager
from core.tools.utils.tool_parameter_converter import ToolParameterConverter
+if TYPE_CHECKING:
+ from core.file.file_obj import FileVar
+
class Tool(BaseModel, ABC):
identity: Optional[ToolIdentity] = None
@@ -76,7 +78,7 @@ def fork_tool_runtime(self, runtime: dict[str, Any]) -> 'Tool':
description=self.description.model_copy() if self.description else None,
runtime=Tool.Runtime(**runtime),
)
-
+
@abstractmethod
def tool_provider_type(self) -> ToolProviderType:
"""
@@ -84,7 +86,7 @@ def tool_provider_type(self) -> ToolProviderType:
:return: the tool provider type
"""
-
+
def load_variables(self, variables: ToolRuntimeVariablePool):
"""
load variables from database
@@ -99,7 +101,7 @@ def set_image_variable(self, variable_name: str, image_key: str) -> None:
"""
if not self.variables:
return
-
+
self.variables.set_file(self.identity.name, variable_name, image_key)
def set_text_variable(self, variable_name: str, text: str) -> None:
@@ -108,9 +110,9 @@ def set_text_variable(self, variable_name: str, text: str) -> None:
"""
if not self.variables:
return
-
+
self.variables.set_text(self.identity.name, variable_name, text)
-
+
def get_variable(self, name: Union[str, Enum]) -> Optional[ToolRuntimeVariable]:
"""
get a variable
@@ -120,14 +122,14 @@ def get_variable(self, name: Union[str, Enum]) -> Optional[ToolRuntimeVariable]:
"""
if not self.variables:
return None
-
+
if isinstance(name, Enum):
name = name.value
-
+
for variable in self.variables.pool:
if variable.name == name:
return variable
-
+
return None
def get_default_image_variable(self) -> Optional[ToolRuntimeVariable]:
@@ -138,9 +140,9 @@ def get_default_image_variable(self) -> Optional[ToolRuntimeVariable]:
"""
if not self.variables:
return None
-
+
return self.get_variable(self.VARIABLE_KEY.IMAGE)
-
+
def get_variable_file(self, name: Union[str, Enum]) -> Optional[bytes]:
"""
get a variable file
@@ -151,7 +153,7 @@ def get_variable_file(self, name: Union[str, Enum]) -> Optional[bytes]:
variable = self.get_variable(name)
if not variable:
return None
-
+
if not isinstance(variable, ToolRuntimeImageVariable):
return None
@@ -160,9 +162,9 @@ def get_variable_file(self, name: Union[str, Enum]) -> Optional[bytes]:
file_binary = ToolFileManager.get_file_binary_by_message_file_id(message_file_id)
if not file_binary:
return None
-
+
return file_binary[0]
-
+
def list_variables(self) -> list[ToolRuntimeVariable]:
"""
list all variables
@@ -171,9 +173,9 @@ def list_variables(self) -> list[ToolRuntimeVariable]:
"""
if not self.variables:
return []
-
+
return self.variables.pool
-
+
def list_default_image_variables(self) -> list[ToolRuntimeVariable]:
"""
list all image variables
@@ -182,9 +184,9 @@ def list_default_image_variables(self) -> list[ToolRuntimeVariable]:
"""
if not self.variables:
return []
-
+
result = []
-
+
for variable in self.variables.pool:
if variable.name.startswith(self.VARIABLE_KEY.IMAGE.value):
result.append(variable)
@@ -225,7 +227,7 @@ def _transform_tool_parameters_type(self, tool_parameters: Mapping[str, Any]) ->
@abstractmethod
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
pass
-
+
def validate_credentials(self, credentials: dict[str, Any], parameters: dict[str, Any]) -> None:
"""
validate the credentials
@@ -244,7 +246,7 @@ def get_runtime_parameters(self) -> list[ToolParameter]:
:return: the runtime parameters
"""
return self.parameters or []
-
+
def get_all_runtime_parameters(self) -> list[ToolParameter]:
"""
get all runtime parameters
@@ -278,7 +280,7 @@ def get_all_runtime_parameters(self) -> list[ToolParameter]:
parameters.append(parameter)
return parameters
-
+
def create_image_message(self, image: str, save_as: str = '') -> ToolInvokeMessage:
"""
create an image message
@@ -286,18 +288,18 @@ def create_image_message(self, image: str, save_as: str = '') -> ToolInvokeMessa
:param image: the url of the image
:return: the image message
"""
- return ToolInvokeMessage(type=ToolInvokeMessage.MessageType.IMAGE,
- message=image,
+ return ToolInvokeMessage(type=ToolInvokeMessage.MessageType.IMAGE,
+ message=image,
save_as=save_as)
-
- def create_file_var_message(self, file_var: FileVar) -> ToolInvokeMessage:
+
+ def create_file_var_message(self, file_var: "FileVar") -> ToolInvokeMessage:
return ToolInvokeMessage(type=ToolInvokeMessage.MessageType.FILE_VAR,
message='',
meta={
'file_var': file_var
},
save_as='')
-
+
def create_link_message(self, link: str, save_as: str = '') -> ToolInvokeMessage:
"""
create a link message
@@ -305,10 +307,10 @@ def create_link_message(self, link: str, save_as: str = '') -> ToolInvokeMessage
:param link: the url of the link
:return: the link message
"""
- return ToolInvokeMessage(type=ToolInvokeMessage.MessageType.LINK,
- message=link,
+ return ToolInvokeMessage(type=ToolInvokeMessage.MessageType.LINK,
+ message=link,
save_as=save_as)
-
+
def create_text_message(self, text: str, save_as: str = '') -> ToolInvokeMessage:
"""
create a text message
@@ -321,7 +323,7 @@ def create_text_message(self, text: str, save_as: str = '') -> ToolInvokeMessage
message=text,
save_as=save_as
)
-
+
def create_blob_message(self, blob: bytes, meta: dict = None, save_as: str = '') -> ToolInvokeMessage:
"""
create a blob message
diff --git a/api/core/tools/tool/workflow_tool.py b/api/core/tools/tool/workflow_tool.py
index 071081303c3b2a..12e498e76d8cd5 100644
--- a/api/core/tools/tool/workflow_tool.py
+++ b/api/core/tools/tool/workflow_tool.py
@@ -72,6 +72,7 @@ def _invoke(
result.append(self.create_file_var_message(file))
result.append(self.create_text_message(json.dumps(outputs, ensure_ascii=False)))
+ result.append(self.create_json_message(outputs))
return result
diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py
index ef9e5b67ae2ab6..564b9d3e14c15e 100644
--- a/api/core/tools/utils/message_transformer.py
+++ b/api/core/tools/utils/message_transformer.py
@@ -1,7 +1,7 @@
import logging
from mimetypes import guess_extension
-from core.file.file_obj import FileTransferMethod, FileType, FileVar
+from core.file.file_obj import FileTransferMethod, FileType
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool_file_manager import ToolFileManager
@@ -27,12 +27,12 @@ def transform_tool_invoke_messages(cls, messages: list[ToolInvokeMessage],
# try to download image
try:
file = ToolFileManager.create_file_by_url(
- user_id=user_id,
+ user_id=user_id,
tenant_id=tenant_id,
conversation_id=conversation_id,
file_url=message.message
)
-
+
url = f'/files/tools/{file.id}{guess_extension(file.mimetype) or ".png"}'
result.append(ToolInvokeMessage(
@@ -55,14 +55,14 @@ def transform_tool_invoke_messages(cls, messages: list[ToolInvokeMessage],
# if message is str, encode it to bytes
if isinstance(message.message, str):
message.message = message.message.encode('utf-8')
-
+
file = ToolFileManager.create_file_by_raw(
user_id=user_id, tenant_id=tenant_id,
conversation_id=conversation_id,
file_binary=message.message,
mimetype=mimetype
)
-
+
url = cls.get_tool_file_url(file.id, guess_extension(file.mimetype))
# check if file is image
@@ -81,7 +81,7 @@ def transform_tool_invoke_messages(cls, messages: list[ToolInvokeMessage],
meta=message.meta.copy() if message.meta is not None else {},
))
elif message.type == ToolInvokeMessage.MessageType.FILE_VAR:
- file_var: FileVar = message.meta.get('file_var')
+ file_var = message.meta.get('file_var')
if file_var:
if file_var.transfer_method == FileTransferMethod.TOOL_FILE:
url = cls.get_tool_file_url(file_var.related_id, file_var.extension)
@@ -103,7 +103,7 @@ def transform_tool_invoke_messages(cls, messages: list[ToolInvokeMessage],
result.append(message)
return result
-
+
@classmethod
def get_tool_file_url(cls, tool_file_id: str, extension: str) -> str:
- return f'/files/tools/{tool_file_id}{extension or ".bin"}'
\ No newline at end of file
+ return f'/files/tools/{tool_file_id}{extension or ".bin"}'
diff --git a/api/core/workflow/entities/node_entities.py b/api/core/workflow/entities/node_entities.py
index 996aae94c20e27..025453567bfc1b 100644
--- a/api/core/workflow/entities/node_entities.py
+++ b/api/core/workflow/entities/node_entities.py
@@ -4,13 +4,14 @@
from pydantic import BaseModel
-from models.workflow import WorkflowNodeExecutionStatus
+from models import WorkflowNodeExecutionStatus
class NodeType(Enum):
"""
Node Types.
"""
+
START = 'start'
END = 'end'
ANSWER = 'answer'
@@ -23,10 +24,12 @@ class NodeType(Enum):
HTTP_REQUEST = 'http-request'
TOOL = 'tool'
VARIABLE_AGGREGATOR = 'variable-aggregator'
+ # TODO: merge this into VARIABLE_AGGREGATOR
VARIABLE_ASSIGNER = 'variable-assigner'
LOOP = 'loop'
ITERATION = 'iteration'
PARAMETER_EXTRACTOR = 'parameter-extractor'
+ CONVERSATION_VARIABLE_ASSIGNER = 'assigner'
@classmethod
def value_of(cls, value: str) -> 'NodeType':
@@ -42,33 +45,11 @@ def value_of(cls, value: str) -> 'NodeType':
raise ValueError(f'invalid node type value {value}')
-class SystemVariable(Enum):
- """
- System Variables.
- """
- QUERY = 'query'
- FILES = 'files'
- CONVERSATION_ID = 'conversation_id'
- USER_ID = 'user_id'
-
- @classmethod
- def value_of(cls, value: str) -> 'SystemVariable':
- """
- Get value of given system variable.
-
- :param value: system variable value
- :return: system variable
- """
- for system_variable in cls:
- if system_variable.value == value:
- return system_variable
- raise ValueError(f'invalid system variable value {value}')
-
-
class NodeRunMetadataKey(Enum):
"""
Node Run Metadata Key.
"""
+
TOTAL_TOKENS = 'total_tokens'
TOTAL_PRICE = 'total_price'
CURRENCY = 'currency'
@@ -81,6 +62,7 @@ class NodeRunResult(BaseModel):
"""
Node Run Result.
"""
+
status: WorkflowNodeExecutionStatus = WorkflowNodeExecutionStatus.RUNNING
inputs: Optional[Mapping[str, Any]] = None # node inputs
diff --git a/api/core/workflow/entities/variable_pool.py b/api/core/workflow/entities/variable_pool.py
index a27b4261e486bc..9fe3356faa2ef5 100644
--- a/api/core/workflow/entities/variable_pool.py
+++ b/api/core/workflow/entities/variable_pool.py
@@ -6,13 +6,14 @@
from core.app.segments import Segment, Variable, factory
from core.file.file_obj import FileVar
-from core.workflow.entities.node_entities import SystemVariable
+from core.workflow.enums import SystemVariable
VariableValue = Union[str, int, float, dict, list, FileVar]
SYSTEM_VARIABLE_NODE_ID = 'sys'
ENVIRONMENT_VARIABLE_NODE_ID = 'env'
+CONVERSATION_VARIABLE_NODE_ID = 'conversation'
class VariablePool:
@@ -21,6 +22,7 @@ def __init__(
system_variables: Mapping[SystemVariable, Any],
user_inputs: Mapping[str, Any],
environment_variables: Sequence[Variable],
+ conversation_variables: Sequence[Variable] | None = None,
) -> None:
# system variables
# for example:
@@ -44,9 +46,13 @@ def __init__(
self.add((SYSTEM_VARIABLE_NODE_ID, key.value), value)
# Add environment variables to the variable pool
- for var in environment_variables or []:
+ for var in environment_variables:
self.add((ENVIRONMENT_VARIABLE_NODE_ID, var.name), var)
+ # Add conversation variables to the variable pool
+ for var in conversation_variables or []:
+ self.add((CONVERSATION_VARIABLE_NODE_ID, var.name), var)
+
def add(self, selector: Sequence[str], value: Any, /) -> None:
"""
Adds a variable to the variable pool.
diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py
new file mode 100644
index 00000000000000..4757cf32f88988
--- /dev/null
+++ b/api/core/workflow/enums.py
@@ -0,0 +1,25 @@
+from enum import Enum
+
+
+class SystemVariable(str, Enum):
+ """
+ System Variables.
+ """
+ QUERY = 'query'
+ FILES = 'files'
+ CONVERSATION_ID = 'conversation_id'
+ USER_ID = 'user_id'
+ DIALOGUE_COUNT = 'dialogue_count'
+
+ @classmethod
+ def value_of(cls, value: str):
+ """
+ Get value of given system variable.
+
+ :param value: system variable value
+ :return: system variable
+ """
+ for system_variable in cls:
+ if system_variable.value == value:
+ return system_variable
+ raise ValueError(f'invalid system variable value {value}')
diff --git a/api/core/workflow/nodes/base_node.py b/api/core/workflow/nodes/base_node.py
index d8c812e7ef1244..3d9cf52771e146 100644
--- a/api/core/workflow/nodes/base_node.py
+++ b/api/core/workflow/nodes/base_node.py
@@ -8,6 +8,7 @@
from core.workflow.entities.base_node_data_entities import BaseIterationState, BaseNodeData
from core.workflow.entities.node_entities import NodeRunResult, NodeType
from core.workflow.entities.variable_pool import VariablePool
+from models import WorkflowNodeExecutionStatus
class UserFrom(Enum):
@@ -91,14 +92,19 @@ def run(self, variable_pool: VariablePool) -> NodeRunResult:
:param variable_pool: variable pool
:return:
"""
- result = self._run(
- variable_pool=variable_pool
- )
-
- self.node_run_result = result
- return result
-
- def publish_text_chunk(self, text: str, value_selector: list[str] = None) -> None:
+ try:
+ result = self._run(
+ variable_pool=variable_pool
+ )
+ self.node_run_result = result
+ return result
+ except Exception as e:
+ return NodeRunResult(
+ status=WorkflowNodeExecutionStatus.FAILED,
+ error=str(e),
+ )
+
+ def publish_text_chunk(self, text: str, value_selector: list[str] | None = None) -> None:
"""
Publish text chunk
:param text: chunk text
diff --git a/api/core/workflow/nodes/http_request/http_executor.py b/api/core/workflow/nodes/http_request/http_executor.py
index 3c24c0a018d2b9..db18bd00b2d9e1 100644
--- a/api/core/workflow/nodes/http_request/http_executor.py
+++ b/api/core/workflow/nodes/http_request/http_executor.py
@@ -337,7 +337,7 @@ def _format_template(
if variable is None:
raise ValueError(f'Variable {variable_selector.variable} not found')
if escape_quotes and isinstance(variable, str):
- value = variable.replace('"', '\\"')
+ value = variable.replace('"', '\\"').replace('\n', '\\n')
else:
value = variable
variable_value_mapping[variable_selector.variable] = value
diff --git a/api/core/workflow/nodes/http_request/http_request_node.py b/api/core/workflow/nodes/http_request/http_request_node.py
index bbe5f9ad43f561..1facf8a4f4a4b5 100644
--- a/api/core/workflow/nodes/http_request/http_request_node.py
+++ b/api/core/workflow/nodes/http_request/http_request_node.py
@@ -133,9 +133,6 @@ def extract_files(self, url: str, response: HttpExecutorResponse) -> list[FileVa
"""
files = []
mimetype, file_binary = response.extract_file()
- # if not image, return directly
- if 'image' not in mimetype:
- return files
if mimetype:
# extract filename from url
diff --git a/api/core/workflow/nodes/llm/llm_node.py b/api/core/workflow/nodes/llm/llm_node.py
index 4431259a57543b..c20e0d45062f51 100644
--- a/api/core/workflow/nodes/llm/llm_node.py
+++ b/api/core/workflow/nodes/llm/llm_node.py
@@ -1,14 +1,13 @@
import json
from collections.abc import Generator
from copy import deepcopy
-from typing import Optional, cast
+from typing import TYPE_CHECKING, Optional, cast
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.app.entities.queue_entities import QueueRetrieverResourcesEvent
from core.entities.model_entities import ModelStatus
from core.entities.provider_entities import QuotaUnit
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
-from core.file.file_obj import FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities.llm_entities import LLMUsage
@@ -23,8 +22,9 @@
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig
from core.prompt.utils.prompt_message_util import PromptMessageUtil
-from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType, SystemVariable
+from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.base_node import BaseNode
from core.workflow.nodes.llm.entities import (
LLMNodeChatModelMessage,
@@ -38,6 +38,10 @@
from models.provider import Provider, ProviderType
from models.workflow import WorkflowNodeExecutionStatus
+if TYPE_CHECKING:
+ from core.file.file_obj import FileVar
+
+
class LLMNode(BaseNode):
_node_data_cls = LLMNodeData
@@ -70,7 +74,7 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult:
node_inputs = {}
# fetch files
- files: list[FileVar] = self._fetch_files(node_data, variable_pool)
+ files = self._fetch_files(node_data, variable_pool)
if files:
node_inputs['#files#'] = [file.to_dict() for file in files]
@@ -201,8 +205,8 @@ def _handle_invoke_result(self, invoke_result: Generator) -> tuple[str, LLMUsage
usage = LLMUsage.empty_usage()
return full_text, usage
-
- def _transform_chat_messages(self,
+
+ def _transform_chat_messages(self,
messages: list[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate
) -> list[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate:
"""
@@ -249,13 +253,13 @@ def parse_dict(d: dict) -> str:
# check if it's a context structure
if 'metadata' in d and '_source' in d['metadata'] and 'content' in d:
return d['content']
-
+
# else, parse the dict
try:
return json.dumps(d, ensure_ascii=False)
except Exception:
return str(d)
-
+
if isinstance(value, str):
value = value
elif isinstance(value, list):
@@ -321,7 +325,7 @@ def _fetch_inputs(self, node_data: LLMNodeData, variable_pool: VariablePool) ->
return inputs
- def _fetch_files(self, node_data: LLMNodeData, variable_pool: VariablePool) -> list[FileVar]:
+ def _fetch_files(self, node_data: LLMNodeData, variable_pool: VariablePool) -> list["FileVar"]:
"""
Fetch files
:param node_data: node data
@@ -520,7 +524,7 @@ def _fetch_prompt_messages(self, node_data: LLMNodeData,
query: Optional[str],
query_prompt_template: Optional[str],
inputs: dict[str, str],
- files: list[FileVar],
+ files: list["FileVar"],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigWithCredentialsEntity) \
diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py
index 969b1c241e52a8..554e3b6074ed58 100644
--- a/api/core/workflow/nodes/tool/tool_node.py
+++ b/api/core/workflow/nodes/tool/tool_node.py
@@ -2,19 +2,20 @@
from os import path
from typing import Any, cast
-from core.app.segments import parser
+from core.app.segments import ArrayAnyVariable, parser
from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler
from core.file.file_obj import FileTransferMethod, FileType, FileVar
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter
from core.tools.tool_engine import ToolEngine
from core.tools.tool_manager import ToolManager
from core.tools.utils.message_transformer import ToolFileMessageTransformer
-from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType, SystemVariable
+from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.base_node import BaseNode
from core.workflow.nodes.tool.entities import ToolNodeData
from core.workflow.utils.variable_template_parser import VariableTemplateParser
-from models.workflow import WorkflowNodeExecutionStatus
+from models import WorkflowNodeExecutionStatus
class ToolNode(BaseNode):
@@ -118,6 +119,7 @@ def _generate_parameters(
for parameter_name in node_data.tool_parameters:
parameter = tool_parameters_dictionary.get(parameter_name)
if not parameter:
+ result[parameter_name] = None
continue
if parameter.type == ToolParameter.ToolParameterType.FILE:
result[parameter_name] = [
@@ -139,9 +141,9 @@ def _generate_parameters(
return result
def _fetch_files(self, variable_pool: VariablePool) -> list[FileVar]:
- # FIXME: ensure this is a ArrayVariable contains FileVariable.
variable = variable_pool.get(['sys', SystemVariable.FILES.value])
- return [file_var.value for file_var in variable.value] if variable else []
+ assert isinstance(variable, ArrayAnyVariable)
+ return list(variable.value) if variable else []
def _convert_tool_messages(self, messages: list[ToolInvokeMessage]):
"""
diff --git a/api/core/workflow/nodes/variable_assigner/__init__.py b/api/core/workflow/nodes/variable_assigner/__init__.py
new file mode 100644
index 00000000000000..552cc367f2674f
--- /dev/null
+++ b/api/core/workflow/nodes/variable_assigner/__init__.py
@@ -0,0 +1,109 @@
+from collections.abc import Sequence
+from enum import Enum
+from typing import Optional, cast
+
+from sqlalchemy import select
+from sqlalchemy.orm import Session
+
+from core.app.segments import SegmentType, Variable, factory
+from core.workflow.entities.base_node_data_entities import BaseNodeData
+from core.workflow.entities.node_entities import NodeRunResult, NodeType
+from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.nodes.base_node import BaseNode
+from extensions.ext_database import db
+from models import ConversationVariable, WorkflowNodeExecutionStatus
+
+
+class VariableAssignerNodeError(Exception):
+ pass
+
+
+class WriteMode(str, Enum):
+ OVER_WRITE = 'over-write'
+ APPEND = 'append'
+ CLEAR = 'clear'
+
+
+class VariableAssignerData(BaseNodeData):
+ title: str = 'Variable Assigner'
+ desc: Optional[str] = 'Assign a value to a variable'
+ assigned_variable_selector: Sequence[str]
+ write_mode: WriteMode
+ input_variable_selector: Sequence[str]
+
+
+class VariableAssignerNode(BaseNode):
+ _node_data_cls: type[BaseNodeData] = VariableAssignerData
+ _node_type: NodeType = NodeType.CONVERSATION_VARIABLE_ASSIGNER
+
+ def _run(self, variable_pool: VariablePool) -> NodeRunResult:
+ data = cast(VariableAssignerData, self.node_data)
+
+ # Should be String, Number, Object, ArrayString, ArrayNumber, ArrayObject
+ original_variable = variable_pool.get(data.assigned_variable_selector)
+ if not isinstance(original_variable, Variable):
+ raise VariableAssignerNodeError('assigned variable not found')
+
+ match data.write_mode:
+ case WriteMode.OVER_WRITE:
+ income_value = variable_pool.get(data.input_variable_selector)
+ if not income_value:
+ raise VariableAssignerNodeError('input value not found')
+ updated_variable = original_variable.model_copy(update={'value': income_value.value})
+
+ case WriteMode.APPEND:
+ income_value = variable_pool.get(data.input_variable_selector)
+ if not income_value:
+ raise VariableAssignerNodeError('input value not found')
+ updated_value = original_variable.value + [income_value.value]
+ updated_variable = original_variable.model_copy(update={'value': updated_value})
+
+ case WriteMode.CLEAR:
+ income_value = get_zero_value(original_variable.value_type)
+ updated_variable = original_variable.model_copy(update={'value': income_value.to_object()})
+
+ case _:
+ raise VariableAssignerNodeError(f'unsupported write mode: {data.write_mode}')
+
+ # Over write the variable.
+ variable_pool.add(data.assigned_variable_selector, updated_variable)
+
+ # Update conversation variable.
+ # TODO: Find a better way to use the database.
+ conversation_id = variable_pool.get(['sys', 'conversation_id'])
+ if not conversation_id:
+ raise VariableAssignerNodeError('conversation_id not found')
+ update_conversation_variable(conversation_id=conversation_id.text, variable=updated_variable)
+
+ return NodeRunResult(
+ status=WorkflowNodeExecutionStatus.SUCCEEDED,
+ inputs={
+ 'value': income_value.to_object(),
+ },
+ )
+
+
+def update_conversation_variable(conversation_id: str, variable: Variable):
+ stmt = select(ConversationVariable).where(
+ ConversationVariable.id == variable.id, ConversationVariable.conversation_id == conversation_id
+ )
+ with Session(db.engine) as session:
+ row = session.scalar(stmt)
+ if not row:
+ raise VariableAssignerNodeError('conversation variable not found in the database')
+ row.data = variable.model_dump_json()
+ session.commit()
+
+
+def get_zero_value(t: SegmentType):
+ match t:
+ case SegmentType.ARRAY_OBJECT | SegmentType.ARRAY_STRING | SegmentType.ARRAY_NUMBER:
+ return factory.build_segment([])
+ case SegmentType.OBJECT:
+ return factory.build_segment({})
+ case SegmentType.STRING:
+ return factory.build_segment('')
+ case SegmentType.NUMBER:
+ return factory.build_segment(0)
+ case _:
+ raise VariableAssignerNodeError(f'unsupported variable type: {t}')
diff --git a/api/core/workflow/workflow_engine_manager.py b/api/core/workflow/workflow_engine_manager.py
index bd2b3eafa7a8b1..3157eedfee5238 100644
--- a/api/core/workflow/workflow_engine_manager.py
+++ b/api/core/workflow/workflow_engine_manager.py
@@ -3,13 +3,13 @@
from collections.abc import Mapping, Sequence
from typing import Any, Optional, cast
+import contexts
from configs import dify_config
-from core.app.app_config.entities import FileExtraConfig
from core.app.apps.base_app_queue_manager import GenerateTaskStoppedException
from core.app.entities.app_invoke_entities import InvokeFrom
-from core.file.file_obj import FileTransferMethod, FileType, FileVar
+from core.file.file_obj import FileExtraConfig, FileTransferMethod, FileType, FileVar
from core.workflow.callbacks.base_workflow_callback import WorkflowCallback
-from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType, SystemVariable
+from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeRunResult, NodeType
from core.workflow.entities.variable_pool import VariablePool, VariableValue
from core.workflow.entities.workflow_entities import WorkflowNodeAndResult, WorkflowRunState
from core.workflow.errors import WorkflowNodeRunFailedError
@@ -30,6 +30,7 @@
from core.workflow.nodes.template_transform.template_transform_node import TemplateTransformNode
from core.workflow.nodes.tool.tool_node import ToolNode
from core.workflow.nodes.variable_aggregator.variable_aggregator_node import VariableAggregatorNode
+from core.workflow.nodes.variable_assigner import VariableAssignerNode
from extensions.ext_database import db
from models.workflow import (
Workflow,
@@ -51,7 +52,8 @@
NodeType.VARIABLE_AGGREGATOR: VariableAggregatorNode,
NodeType.VARIABLE_ASSIGNER: VariableAggregatorNode,
NodeType.ITERATION: IterationNode,
- NodeType.PARAMETER_EXTRACTOR: ParameterExtractorNode
+ NodeType.PARAMETER_EXTRACTOR: ParameterExtractorNode,
+ NodeType.CONVERSATION_VARIABLE_ASSIGNER: VariableAssignerNode,
}
logger = logging.getLogger(__name__)
@@ -94,19 +96,18 @@ def run_workflow(
user_id: str,
user_from: UserFrom,
invoke_from: InvokeFrom,
- user_inputs: Mapping[str, Any],
- system_inputs: Mapping[SystemVariable, Any],
callbacks: Sequence[WorkflowCallback],
- call_depth: int = 0
+ call_depth: int = 0,
+ variable_pool: VariablePool | None = None,
) -> None:
"""
:param workflow: Workflow instance
:param user_id: user id
:param user_from: user from
- :param user_inputs: user variables inputs
- :param system_inputs: system inputs, like: query, files
+ :param invoke_from: invoke from
:param callbacks: workflow callbacks
:param call_depth: call depth
+ :param variable_pool: variable pool
"""
# fetch workflow graph
graph = workflow.graph_dict
@@ -122,18 +123,14 @@ def run_workflow(
if not isinstance(graph.get('edges'), list):
raise ValueError('edges in workflow graph must be a list')
- # init variable pool
- variable_pool = VariablePool(
- system_variables=system_inputs,
- user_inputs=user_inputs,
- environment_variables=workflow.environment_variables,
- )
workflow_call_max_depth = dify_config.WORKFLOW_CALL_MAX_DEPTH
if call_depth > workflow_call_max_depth:
raise ValueError('Max workflow call depth {} reached.'.format(workflow_call_max_depth))
# init workflow run state
+ if not variable_pool:
+ variable_pool = contexts.workflow_variable_pool.get()
workflow_run_state = WorkflowRunState(
workflow=workflow,
start_at=time.perf_counter(),
@@ -403,6 +400,7 @@ def single_step_run_workflow_node(self, workflow: Workflow,
system_variables={},
user_inputs={},
environment_variables=workflow.environment_variables,
+ conversation_variables=workflow.conversation_variables,
)
if node_cls is None:
@@ -468,6 +466,7 @@ def single_step_run_iteration_workflow_node(self, workflow: Workflow,
system_variables={},
user_inputs={},
environment_variables=workflow.environment_variables,
+ conversation_variables=workflow.conversation_variables,
)
# variable selector to variable mapping
diff --git a/api/events/app_event.py b/api/events/app_event.py
index 67a5982527f7b6..f2ce71bbbb3632 100644
--- a/api/events/app_event.py
+++ b/api/events/app_event.py
@@ -1,13 +1,13 @@
from blinker import signal
# sender: app
-app_was_created = signal('app-was-created')
+app_was_created = signal("app-was-created")
# sender: app, kwargs: app_model_config
-app_model_config_was_updated = signal('app-model-config-was-updated')
+app_model_config_was_updated = signal("app-model-config-was-updated")
# sender: app, kwargs: published_workflow
-app_published_workflow_was_updated = signal('app-published-workflow-was-updated')
+app_published_workflow_was_updated = signal("app-published-workflow-was-updated")
# sender: app, kwargs: synced_draft_workflow
-app_draft_workflow_was_synced = signal('app-draft-workflow-was-synced')
+app_draft_workflow_was_synced = signal("app-draft-workflow-was-synced")
diff --git a/api/events/dataset_event.py b/api/events/dataset_event.py
index d4a2b6f313c13a..750b7424e2347b 100644
--- a/api/events/dataset_event.py
+++ b/api/events/dataset_event.py
@@ -1,4 +1,4 @@
from blinker import signal
# sender: dataset
-dataset_was_deleted = signal('dataset-was-deleted')
+dataset_was_deleted = signal("dataset-was-deleted")
diff --git a/api/events/document_event.py b/api/events/document_event.py
index f95326630b2b7a..2c5a416a5e0c91 100644
--- a/api/events/document_event.py
+++ b/api/events/document_event.py
@@ -1,4 +1,4 @@
from blinker import signal
# sender: document
-document_was_deleted = signal('document-was-deleted')
+document_was_deleted = signal("document-was-deleted")
diff --git a/api/events/event_handlers/clean_when_dataset_deleted.py b/api/events/event_handlers/clean_when_dataset_deleted.py
index 42f1c70614c49a..7caa2d1cc9f3f2 100644
--- a/api/events/event_handlers/clean_when_dataset_deleted.py
+++ b/api/events/event_handlers/clean_when_dataset_deleted.py
@@ -5,5 +5,11 @@
@dataset_was_deleted.connect
def handle(sender, **kwargs):
dataset = sender
- clean_dataset_task.delay(dataset.id, dataset.tenant_id, dataset.indexing_technique,
- dataset.index_struct, dataset.collection_binding_id, dataset.doc_form)
+ clean_dataset_task.delay(
+ dataset.id,
+ dataset.tenant_id,
+ dataset.indexing_technique,
+ dataset.index_struct,
+ dataset.collection_binding_id,
+ dataset.doc_form,
+ )
diff --git a/api/events/event_handlers/clean_when_document_deleted.py b/api/events/event_handlers/clean_when_document_deleted.py
index 24022da15f81ee..00a66f50ad9319 100644
--- a/api/events/event_handlers/clean_when_document_deleted.py
+++ b/api/events/event_handlers/clean_when_document_deleted.py
@@ -5,7 +5,7 @@
@document_was_deleted.connect
def handle(sender, **kwargs):
document_id = sender
- dataset_id = kwargs.get('dataset_id')
- doc_form = kwargs.get('doc_form')
- file_id = kwargs.get('file_id')
+ dataset_id = kwargs.get("dataset_id")
+ doc_form = kwargs.get("doc_form")
+ file_id = kwargs.get("file_id")
clean_document_task.delay(document_id, dataset_id, doc_form, file_id)
diff --git a/api/events/event_handlers/create_document_index.py b/api/events/event_handlers/create_document_index.py
index 68dae5a5537cd7..72a135e73d4ca5 100644
--- a/api/events/event_handlers/create_document_index.py
+++ b/api/events/event_handlers/create_document_index.py
@@ -14,21 +14,25 @@
@document_index_created.connect
def handle(sender, **kwargs):
dataset_id = sender
- document_ids = kwargs.get('document_ids', None)
+ document_ids = kwargs.get("document_ids", None)
documents = []
start_at = time.perf_counter()
for document_id in document_ids:
- logging.info(click.style('Start process document: {}'.format(document_id), fg='green'))
+ logging.info(click.style("Start process document: {}".format(document_id), fg="green"))
- document = db.session.query(Document).filter(
- Document.id == document_id,
- Document.dataset_id == dataset_id
- ).first()
+ document = (
+ db.session.query(Document)
+ .filter(
+ Document.id == document_id,
+ Document.dataset_id == dataset_id,
+ )
+ .first()
+ )
if not document:
- raise NotFound('Document not found')
+ raise NotFound("Document not found")
- document.indexing_status = 'parsing'
+ document.indexing_status = "parsing"
document.processing_started_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
documents.append(document)
db.session.add(document)
@@ -38,8 +42,8 @@ def handle(sender, **kwargs):
indexing_runner = IndexingRunner()
indexing_runner.run(documents)
end_at = time.perf_counter()
- logging.info(click.style('Processed dataset: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
+ logging.info(click.style("Processed dataset: {} latency: {}".format(dataset_id, end_at - start_at), fg="green"))
except DocumentIsPausedException as ex:
- logging.info(click.style(str(ex), fg='yellow'))
+ logging.info(click.style(str(ex), fg="yellow"))
except Exception:
pass
diff --git a/api/events/event_handlers/create_installed_app_when_app_created.py b/api/events/event_handlers/create_installed_app_when_app_created.py
index 31084ce0fe8bdc..57412cc4ad0af2 100644
--- a/api/events/event_handlers/create_installed_app_when_app_created.py
+++ b/api/events/event_handlers/create_installed_app_when_app_created.py
@@ -10,7 +10,7 @@ def handle(sender, **kwargs):
installed_app = InstalledApp(
tenant_id=app.tenant_id,
app_id=app.id,
- app_owner_tenant_id=app.tenant_id
+ app_owner_tenant_id=app.tenant_id,
)
db.session.add(installed_app)
db.session.commit()
diff --git a/api/events/event_handlers/create_site_record_when_app_created.py b/api/events/event_handlers/create_site_record_when_app_created.py
index f0eb7159b631e0..abaf0e41ec30e6 100644
--- a/api/events/event_handlers/create_site_record_when_app_created.py
+++ b/api/events/event_handlers/create_site_record_when_app_created.py
@@ -7,15 +7,15 @@
def handle(sender, **kwargs):
"""Create site record when an app is created."""
app = sender
- account = kwargs.get('account')
+ account = kwargs.get("account")
site = Site(
app_id=app.id,
title=app.name,
- icon = app.icon,
- icon_background = app.icon_background,
+ icon=app.icon,
+ icon_background=app.icon_background,
default_language=account.interface_language,
- customize_token_strategy='not_allow',
- code=Site.generate_code(16)
+ customize_token_strategy="not_allow",
+ code=Site.generate_code(16),
)
db.session.add(site)
diff --git a/api/events/event_handlers/deduct_quota_when_messaeg_created.py b/api/events/event_handlers/deduct_quota_when_messaeg_created.py
index 8cf52bf8f5d0b8..843a2320968ced 100644
--- a/api/events/event_handlers/deduct_quota_when_messaeg_created.py
+++ b/api/events/event_handlers/deduct_quota_when_messaeg_created.py
@@ -8,7 +8,7 @@
@message_was_created.connect
def handle(sender, **kwargs):
message = sender
- application_generate_entity = kwargs.get('application_generate_entity')
+ application_generate_entity = kwargs.get("application_generate_entity")
if not isinstance(application_generate_entity, ChatAppGenerateEntity | AgentChatAppGenerateEntity):
return
@@ -39,7 +39,7 @@ def handle(sender, **kwargs):
elif quota_unit == QuotaUnit.CREDITS:
used_quota = 1
- if 'gpt-4' in model_config.model:
+ if "gpt-4" in model_config.model:
used_quota = 20
else:
used_quota = 1
@@ -50,6 +50,6 @@ def handle(sender, **kwargs):
Provider.provider_name == model_config.provider,
Provider.provider_type == ProviderType.SYSTEM.value,
Provider.quota_type == system_configuration.current_quota_type.value,
- Provider.quota_limit > Provider.quota_used
- ).update({'quota_used': Provider.quota_used + used_quota})
+ Provider.quota_limit > Provider.quota_used,
+ ).update({"quota_used": Provider.quota_used + used_quota})
db.session.commit()
diff --git a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py
index 1f6da34ee24d56..f96bb5ef74b62e 100644
--- a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py
+++ b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py
@@ -8,8 +8,8 @@
@app_draft_workflow_was_synced.connect
def handle(sender, **kwargs):
app = sender
- for node_data in kwargs.get('synced_draft_workflow').graph_dict.get('nodes', []):
- if node_data.get('data', {}).get('type') == NodeType.TOOL.value:
+ for node_data in kwargs.get("synced_draft_workflow").graph_dict.get("nodes", []):
+ if node_data.get("data", {}).get("type") == NodeType.TOOL.value:
try:
tool_entity = ToolEntity(**node_data["data"])
tool_runtime = ToolManager.get_tool_runtime(
@@ -23,7 +23,7 @@ def handle(sender, **kwargs):
tool_runtime=tool_runtime,
provider_name=tool_entity.provider_name,
provider_type=tool_entity.provider_type,
- identity_id=f'WORKFLOW.{app.id}.{node_data.get("id")}'
+ identity_id=f'WORKFLOW.{app.id}.{node_data.get("id")}',
)
manager.delete_tool_parameters_cache()
except:
diff --git a/api/events/event_handlers/document_index_event.py b/api/events/event_handlers/document_index_event.py
index 9c4e055debdd9e..3d463fe5b35acf 100644
--- a/api/events/event_handlers/document_index_event.py
+++ b/api/events/event_handlers/document_index_event.py
@@ -1,4 +1,4 @@
from blinker import signal
# sender: document
-document_index_created = signal('document-index-created')
+document_index_created = signal("document-index-created")
diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py
index 2b202c53d0b883..59375b1a0b1a81 100644
--- a/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py
+++ b/api/events/event_handlers/update_app_dataset_join_when_app_model_config_updated.py
@@ -7,13 +7,11 @@
@app_model_config_was_updated.connect
def handle(sender, **kwargs):
app = sender
- app_model_config = kwargs.get('app_model_config')
+ app_model_config = kwargs.get("app_model_config")
dataset_ids = get_dataset_ids_from_model_config(app_model_config)
- app_dataset_joins = db.session.query(AppDatasetJoin).filter(
- AppDatasetJoin.app_id == app.id
- ).all()
+ app_dataset_joins = db.session.query(AppDatasetJoin).filter(AppDatasetJoin.app_id == app.id).all()
removed_dataset_ids = []
if not app_dataset_joins:
@@ -29,16 +27,12 @@ def handle(sender, **kwargs):
if removed_dataset_ids:
for dataset_id in removed_dataset_ids:
db.session.query(AppDatasetJoin).filter(
- AppDatasetJoin.app_id == app.id,
- AppDatasetJoin.dataset_id == dataset_id
+ AppDatasetJoin.app_id == app.id, AppDatasetJoin.dataset_id == dataset_id
).delete()
if added_dataset_ids:
for dataset_id in added_dataset_ids:
- app_dataset_join = AppDatasetJoin(
- app_id=app.id,
- dataset_id=dataset_id
- )
+ app_dataset_join = AppDatasetJoin(app_id=app.id, dataset_id=dataset_id)
db.session.add(app_dataset_join)
db.session.commit()
@@ -51,7 +45,7 @@ def get_dataset_ids_from_model_config(app_model_config: AppModelConfig) -> set:
agent_mode = app_model_config.agent_mode_dict
- tools = agent_mode.get('tools', []) or []
+ tools = agent_mode.get("tools", []) or []
for tool in tools:
if len(list(tool.keys())) != 1:
continue
@@ -63,11 +57,11 @@ def get_dataset_ids_from_model_config(app_model_config: AppModelConfig) -> set:
# get dataset from dataset_configs
dataset_configs = app_model_config.dataset_configs_dict
- datasets = dataset_configs.get('datasets', {}) or {}
- for dataset in datasets.get('datasets', []) or []:
+ datasets = dataset_configs.get("datasets", {}) or {}
+ for dataset in datasets.get("datasets", []) or []:
keys = list(dataset.keys())
- if len(keys) == 1 and keys[0] == 'dataset':
- if dataset['dataset'].get('id'):
- dataset_ids.add(dataset['dataset'].get('id'))
+ if len(keys) == 1 and keys[0] == "dataset":
+ if dataset["dataset"].get("id"):
+ dataset_ids.add(dataset["dataset"].get("id"))
return dataset_ids
diff --git a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py
index 996b1e96910b93..333b85ecb2907a 100644
--- a/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py
+++ b/api/events/event_handlers/update_app_dataset_join_when_app_published_workflow_updated.py
@@ -11,13 +11,11 @@
@app_published_workflow_was_updated.connect
def handle(sender, **kwargs):
app = sender
- published_workflow = kwargs.get('published_workflow')
+ published_workflow = kwargs.get("published_workflow")
published_workflow = cast(Workflow, published_workflow)
dataset_ids = get_dataset_ids_from_workflow(published_workflow)
- app_dataset_joins = db.session.query(AppDatasetJoin).filter(
- AppDatasetJoin.app_id == app.id
- ).all()
+ app_dataset_joins = db.session.query(AppDatasetJoin).filter(AppDatasetJoin.app_id == app.id).all()
removed_dataset_ids = []
if not app_dataset_joins:
@@ -33,16 +31,12 @@ def handle(sender, **kwargs):
if removed_dataset_ids:
for dataset_id in removed_dataset_ids:
db.session.query(AppDatasetJoin).filter(
- AppDatasetJoin.app_id == app.id,
- AppDatasetJoin.dataset_id == dataset_id
+ AppDatasetJoin.app_id == app.id, AppDatasetJoin.dataset_id == dataset_id
).delete()
if added_dataset_ids:
for dataset_id in added_dataset_ids:
- app_dataset_join = AppDatasetJoin(
- app_id=app.id,
- dataset_id=dataset_id
- )
+ app_dataset_join = AppDatasetJoin(app_id=app.id, dataset_id=dataset_id)
db.session.add(app_dataset_join)
db.session.commit()
@@ -54,18 +48,19 @@ def get_dataset_ids_from_workflow(published_workflow: Workflow) -> set:
if not graph:
return dataset_ids
- nodes = graph.get('nodes', [])
+ nodes = graph.get("nodes", [])
# fetch all knowledge retrieval nodes
- knowledge_retrieval_nodes = [node for node in nodes
- if node.get('data', {}).get('type') == NodeType.KNOWLEDGE_RETRIEVAL.value]
+ knowledge_retrieval_nodes = [
+ node for node in nodes if node.get("data", {}).get("type") == NodeType.KNOWLEDGE_RETRIEVAL.value
+ ]
if not knowledge_retrieval_nodes:
return dataset_ids
for node in knowledge_retrieval_nodes:
try:
- node_data = KnowledgeRetrievalNodeData(**node.get('data', {}))
+ node_data = KnowledgeRetrievalNodeData(**node.get("data", {}))
dataset_ids.update(node_data.dataset_ids)
except Exception as e:
continue
diff --git a/api/events/event_handlers/update_provider_last_used_at_when_messaeg_created.py b/api/events/event_handlers/update_provider_last_used_at_when_messaeg_created.py
index 6188f1a0850ac4..a80572c0debb1a 100644
--- a/api/events/event_handlers/update_provider_last_used_at_when_messaeg_created.py
+++ b/api/events/event_handlers/update_provider_last_used_at_when_messaeg_created.py
@@ -9,13 +9,13 @@
@message_was_created.connect
def handle(sender, **kwargs):
message = sender
- application_generate_entity = kwargs.get('application_generate_entity')
+ application_generate_entity = kwargs.get("application_generate_entity")
if not isinstance(application_generate_entity, ChatAppGenerateEntity | AgentChatAppGenerateEntity):
return
db.session.query(Provider).filter(
Provider.tenant_id == application_generate_entity.app_config.tenant_id,
- Provider.provider_name == application_generate_entity.model_conf.provider
- ).update({'last_used': datetime.now(timezone.utc).replace(tzinfo=None)})
+ Provider.provider_name == application_generate_entity.model_conf.provider,
+ ).update({"last_used": datetime.now(timezone.utc).replace(tzinfo=None)})
db.session.commit()
diff --git a/api/events/message_event.py b/api/events/message_event.py
index 21da83f2496af5..6576c35c453c95 100644
--- a/api/events/message_event.py
+++ b/api/events/message_event.py
@@ -1,4 +1,4 @@
from blinker import signal
# sender: message, kwargs: conversation
-message_was_created = signal('message-was-created')
+message_was_created = signal("message-was-created")
diff --git a/api/events/tenant_event.py b/api/events/tenant_event.py
index 942f709917bf61..d99feaac40896d 100644
--- a/api/events/tenant_event.py
+++ b/api/events/tenant_event.py
@@ -1,7 +1,7 @@
from blinker import signal
# sender: tenant
-tenant_was_created = signal('tenant-was-created')
+tenant_was_created = signal("tenant-was-created")
# sender: tenant
-tenant_was_updated = signal('tenant-was-updated')
+tenant_was_updated = signal("tenant-was-updated")
diff --git a/api/extensions/ext_celery.py b/api/extensions/ext_celery.py
index ae9a07534084e7..f5ec7c1759cb9d 100644
--- a/api/extensions/ext_celery.py
+++ b/api/extensions/ext_celery.py
@@ -17,7 +17,7 @@ def __call__(self, *args: object, **kwargs: object) -> object:
backend=app.config["CELERY_BACKEND"],
task_ignore_result=True,
)
-
+
# Add SSL options to the Celery configuration
ssl_options = {
"ssl_cert_reqs": None,
@@ -35,7 +35,7 @@ def __call__(self, *args: object, **kwargs: object) -> object:
celery_app.conf.update(
broker_use_ssl=ssl_options, # Add the SSL options to the broker configuration
)
-
+
celery_app.set_default()
app.extensions["celery"] = celery_app
@@ -45,18 +45,15 @@ def __call__(self, *args: object, **kwargs: object) -> object:
]
day = app.config["CELERY_BEAT_SCHEDULER_TIME"]
beat_schedule = {
- 'clean_embedding_cache_task': {
- 'task': 'schedule.clean_embedding_cache_task.clean_embedding_cache_task',
- 'schedule': timedelta(days=day),
+ "clean_embedding_cache_task": {
+ "task": "schedule.clean_embedding_cache_task.clean_embedding_cache_task",
+ "schedule": timedelta(days=day),
+ },
+ "clean_unused_datasets_task": {
+ "task": "schedule.clean_unused_datasets_task.clean_unused_datasets_task",
+ "schedule": timedelta(days=day),
},
- 'clean_unused_datasets_task': {
- 'task': 'schedule.clean_unused_datasets_task.clean_unused_datasets_task',
- 'schedule': timedelta(days=day),
- }
}
- celery_app.conf.update(
- beat_schedule=beat_schedule,
- imports=imports
- )
+ celery_app.conf.update(beat_schedule=beat_schedule, imports=imports)
return celery_app
diff --git a/api/extensions/ext_compress.py b/api/extensions/ext_compress.py
index 1dbaffcfb0dc27..38e67749fcc994 100644
--- a/api/extensions/ext_compress.py
+++ b/api/extensions/ext_compress.py
@@ -2,15 +2,14 @@
def init_app(app: Flask):
- if app.config.get('API_COMPRESSION_ENABLED'):
+ if app.config.get("API_COMPRESSION_ENABLED"):
from flask_compress import Compress
- app.config['COMPRESS_MIMETYPES'] = [
- 'application/json',
- 'image/svg+xml',
- 'text/html',
+ app.config["COMPRESS_MIMETYPES"] = [
+ "application/json",
+ "image/svg+xml",
+ "text/html",
]
compress = Compress()
compress.init_app(app)
-
diff --git a/api/extensions/ext_database.py b/api/extensions/ext_database.py
index c248e173a252c7..f6ffa536343afc 100644
--- a/api/extensions/ext_database.py
+++ b/api/extensions/ext_database.py
@@ -2,11 +2,11 @@
from sqlalchemy import MetaData
POSTGRES_INDEXES_NAMING_CONVENTION = {
- 'ix': '%(column_0_label)s_idx',
- 'uq': '%(table_name)s_%(column_0_name)s_key',
- 'ck': '%(table_name)s_%(constraint_name)s_check',
- 'fk': '%(table_name)s_%(column_0_name)s_fkey',
- 'pk': '%(table_name)s_pkey',
+ "ix": "%(column_0_label)s_idx",
+ "uq": "%(table_name)s_%(column_0_name)s_key",
+ "ck": "%(table_name)s_%(constraint_name)s_check",
+ "fk": "%(table_name)s_%(column_0_name)s_fkey",
+ "pk": "%(table_name)s_pkey",
}
metadata = MetaData(naming_convention=POSTGRES_INDEXES_NAMING_CONVENTION)
diff --git a/api/extensions/ext_mail.py b/api/extensions/ext_mail.py
index ec3a5cc112b871..b435294abc23ba 100644
--- a/api/extensions/ext_mail.py
+++ b/api/extensions/ext_mail.py
@@ -14,67 +14,69 @@ def is_inited(self) -> bool:
return self._client is not None
def init_app(self, app: Flask):
- if app.config.get('MAIL_TYPE'):
- if app.config.get('MAIL_DEFAULT_SEND_FROM'):
- self._default_send_from = app.config.get('MAIL_DEFAULT_SEND_FROM')
-
- if app.config.get('MAIL_TYPE') == 'resend':
- api_key = app.config.get('RESEND_API_KEY')
+ if app.config.get("MAIL_TYPE"):
+ if app.config.get("MAIL_DEFAULT_SEND_FROM"):
+ self._default_send_from = app.config.get("MAIL_DEFAULT_SEND_FROM")
+
+ if app.config.get("MAIL_TYPE") == "resend":
+ api_key = app.config.get("RESEND_API_KEY")
if not api_key:
- raise ValueError('RESEND_API_KEY is not set')
+ raise ValueError("RESEND_API_KEY is not set")
- api_url = app.config.get('RESEND_API_URL')
+ api_url = app.config.get("RESEND_API_URL")
if api_url:
resend.api_url = api_url
resend.api_key = api_key
self._client = resend.Emails
- elif app.config.get('MAIL_TYPE') == 'smtp':
+ elif app.config.get("MAIL_TYPE") == "smtp":
from libs.smtp import SMTPClient
- if not app.config.get('SMTP_SERVER') or not app.config.get('SMTP_PORT'):
- raise ValueError('SMTP_SERVER and SMTP_PORT are required for smtp mail type')
- if not app.config.get('SMTP_USE_TLS') and app.config.get('SMTP_OPPORTUNISTIC_TLS'):
- raise ValueError('SMTP_OPPORTUNISTIC_TLS is not supported without enabling SMTP_USE_TLS')
+
+ if not app.config.get("SMTP_SERVER") or not app.config.get("SMTP_PORT"):
+ raise ValueError("SMTP_SERVER and SMTP_PORT are required for smtp mail type")
+ if not app.config.get("SMTP_USE_TLS") and app.config.get("SMTP_OPPORTUNISTIC_TLS"):
+ raise ValueError("SMTP_OPPORTUNISTIC_TLS is not supported without enabling SMTP_USE_TLS")
self._client = SMTPClient(
- server=app.config.get('SMTP_SERVER'),
- port=app.config.get('SMTP_PORT'),
- username=app.config.get('SMTP_USERNAME'),
- password=app.config.get('SMTP_PASSWORD'),
- _from=app.config.get('MAIL_DEFAULT_SEND_FROM'),
- use_tls=app.config.get('SMTP_USE_TLS'),
- opportunistic_tls=app.config.get('SMTP_OPPORTUNISTIC_TLS')
+ server=app.config.get("SMTP_SERVER"),
+ port=app.config.get("SMTP_PORT"),
+ username=app.config.get("SMTP_USERNAME"),
+ password=app.config.get("SMTP_PASSWORD"),
+ _from=app.config.get("MAIL_DEFAULT_SEND_FROM"),
+ use_tls=app.config.get("SMTP_USE_TLS"),
+ opportunistic_tls=app.config.get("SMTP_OPPORTUNISTIC_TLS"),
)
else:
- raise ValueError('Unsupported mail type {}'.format(app.config.get('MAIL_TYPE')))
+ raise ValueError("Unsupported mail type {}".format(app.config.get("MAIL_TYPE")))
else:
- logging.warning('MAIL_TYPE is not set')
-
+ logging.warning("MAIL_TYPE is not set")
def send(self, to: str, subject: str, html: str, from_: Optional[str] = None):
if not self._client:
- raise ValueError('Mail client is not initialized')
+ raise ValueError("Mail client is not initialized")
if not from_ and self._default_send_from:
from_ = self._default_send_from
if not from_:
- raise ValueError('mail from is not set')
+ raise ValueError("mail from is not set")
if not to:
- raise ValueError('mail to is not set')
+ raise ValueError("mail to is not set")
if not subject:
- raise ValueError('mail subject is not set')
+ raise ValueError("mail subject is not set")
if not html:
- raise ValueError('mail html is not set')
-
- self._client.send({
- "from": from_,
- "to": to,
- "subject": subject,
- "html": html
- })
+ raise ValueError("mail html is not set")
+
+ self._client.send(
+ {
+ "from": from_,
+ "to": to,
+ "subject": subject,
+ "html": html,
+ }
+ )
def init_app(app: Flask):
diff --git a/api/extensions/ext_redis.py b/api/extensions/ext_redis.py
index 23d7768d4d0f5a..d5fb162fd8f2fb 100644
--- a/api/extensions/ext_redis.py
+++ b/api/extensions/ext_redis.py
@@ -6,18 +6,21 @@
def init_app(app):
connection_class = Connection
- if app.config.get('REDIS_USE_SSL'):
+ if app.config.get("REDIS_USE_SSL"):
connection_class = SSLConnection
- redis_client.connection_pool = redis.ConnectionPool(**{
- 'host': app.config.get('REDIS_HOST'),
- 'port': app.config.get('REDIS_PORT'),
- 'username': app.config.get('REDIS_USERNAME'),
- 'password': app.config.get('REDIS_PASSWORD'),
- 'db': app.config.get('REDIS_DB'),
- 'encoding': 'utf-8',
- 'encoding_errors': 'strict',
- 'decode_responses': False
- }, connection_class=connection_class)
+ redis_client.connection_pool = redis.ConnectionPool(
+ **{
+ "host": app.config.get("REDIS_HOST"),
+ "port": app.config.get("REDIS_PORT"),
+ "username": app.config.get("REDIS_USERNAME"),
+ "password": app.config.get("REDIS_PASSWORD"),
+ "db": app.config.get("REDIS_DB"),
+ "encoding": "utf-8",
+ "encoding_errors": "strict",
+ "decode_responses": False,
+ },
+ connection_class=connection_class,
+ )
- app.extensions['redis'] = redis_client
+ app.extensions["redis"] = redis_client
diff --git a/api/extensions/ext_sentry.py b/api/extensions/ext_sentry.py
index f05c10bc08926e..227c6635f0eb11 100644
--- a/api/extensions/ext_sentry.py
+++ b/api/extensions/ext_sentry.py
@@ -5,16 +5,13 @@
def init_app(app):
- if app.config.get('SENTRY_DSN'):
+ if app.config.get("SENTRY_DSN"):
sentry_sdk.init(
- dsn=app.config.get('SENTRY_DSN'),
- integrations=[
- FlaskIntegration(),
- CeleryIntegration()
- ],
+ dsn=app.config.get("SENTRY_DSN"),
+ integrations=[FlaskIntegration(), CeleryIntegration()],
ignore_errors=[HTTPException, ValueError],
- traces_sample_rate=app.config.get('SENTRY_TRACES_SAMPLE_RATE', 1.0),
- profiles_sample_rate=app.config.get('SENTRY_PROFILES_SAMPLE_RATE', 1.0),
- environment=app.config.get('DEPLOY_ENV'),
- release=f"dify-{app.config.get('CURRENT_VERSION')}-{app.config.get('COMMIT_SHA')}"
+ traces_sample_rate=app.config.get("SENTRY_TRACES_SAMPLE_RATE", 1.0),
+ profiles_sample_rate=app.config.get("SENTRY_PROFILES_SAMPLE_RATE", 1.0),
+ environment=app.config.get("DEPLOY_ENV"),
+ release=f"dify-{app.config.get('CURRENT_VERSION')}-{app.config.get('COMMIT_SHA')}",
)
diff --git a/api/extensions/ext_storage.py b/api/extensions/ext_storage.py
index 38db1c6ce103af..e6c4352577fc3f 100644
--- a/api/extensions/ext_storage.py
+++ b/api/extensions/ext_storage.py
@@ -17,31 +17,19 @@ def __init__(self):
self.storage_runner = None
def init_app(self, app: Flask):
- storage_type = app.config.get('STORAGE_TYPE')
- if storage_type == 's3':
- self.storage_runner = S3Storage(
- app=app
- )
- elif storage_type == 'azure-blob':
- self.storage_runner = AzureStorage(
- app=app
- )
- elif storage_type == 'aliyun-oss':
- self.storage_runner = AliyunStorage(
- app=app
- )
- elif storage_type == 'google-storage':
- self.storage_runner = GoogleStorage(
- app=app
- )
- elif storage_type == 'tencent-cos':
- self.storage_runner = TencentStorage(
- app=app
- )
- elif storage_type == 'oci-storage':
- self.storage_runner = OCIStorage(
- app=app
- )
+ storage_type = app.config.get("STORAGE_TYPE")
+ if storage_type == "s3":
+ self.storage_runner = S3Storage(app=app)
+ elif storage_type == "azure-blob":
+ self.storage_runner = AzureStorage(app=app)
+ elif storage_type == "aliyun-oss":
+ self.storage_runner = AliyunStorage(app=app)
+ elif storage_type == "google-storage":
+ self.storage_runner = GoogleStorage(app=app)
+ elif storage_type == "tencent-cos":
+ self.storage_runner = TencentStorage(app=app)
+ elif storage_type == "oci-storage":
+ self.storage_runner = OCIStorage(app=app)
else:
self.storage_runner = LocalStorage(app=app)
diff --git a/api/extensions/storage/aliyun_storage.py b/api/extensions/storage/aliyun_storage.py
index b81a8691f15457..b962cedc55178d 100644
--- a/api/extensions/storage/aliyun_storage.py
+++ b/api/extensions/storage/aliyun_storage.py
@@ -8,23 +8,22 @@
class AliyunStorage(BaseStorage):
- """Implementation for aliyun storage.
- """
+ """Implementation for aliyun storage."""
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
- self.bucket_name = app_config.get('ALIYUN_OSS_BUCKET_NAME')
+ self.bucket_name = app_config.get("ALIYUN_OSS_BUCKET_NAME")
oss_auth_method = aliyun_s3.Auth
region = None
- if app_config.get('ALIYUN_OSS_AUTH_VERSION') == 'v4':
+ if app_config.get("ALIYUN_OSS_AUTH_VERSION") == "v4":
oss_auth_method = aliyun_s3.AuthV4
- region = app_config.get('ALIYUN_OSS_REGION')
- oss_auth = oss_auth_method(app_config.get('ALIYUN_OSS_ACCESS_KEY'), app_config.get('ALIYUN_OSS_SECRET_KEY'))
+ region = app_config.get("ALIYUN_OSS_REGION")
+ oss_auth = oss_auth_method(app_config.get("ALIYUN_OSS_ACCESS_KEY"), app_config.get("ALIYUN_OSS_SECRET_KEY"))
self.client = aliyun_s3.Bucket(
oss_auth,
- app_config.get('ALIYUN_OSS_ENDPOINT'),
+ app_config.get("ALIYUN_OSS_ENDPOINT"),
self.bucket_name,
connect_timeout=30,
region=region,
diff --git a/api/extensions/storage/azure_storage.py b/api/extensions/storage/azure_storage.py
index af3e7ef84911ff..ca8cbb9188b5c9 100644
--- a/api/extensions/storage/azure_storage.py
+++ b/api/extensions/storage/azure_storage.py
@@ -9,16 +9,15 @@
class AzureStorage(BaseStorage):
- """Implementation for azure storage.
- """
+ """Implementation for azure storage."""
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
- self.bucket_name = app_config.get('AZURE_BLOB_CONTAINER_NAME')
- self.account_url = app_config.get('AZURE_BLOB_ACCOUNT_URL')
- self.account_name = app_config.get('AZURE_BLOB_ACCOUNT_NAME')
- self.account_key = app_config.get('AZURE_BLOB_ACCOUNT_KEY')
+ self.bucket_name = app_config.get("AZURE_BLOB_CONTAINER_NAME")
+ self.account_url = app_config.get("AZURE_BLOB_ACCOUNT_URL")
+ self.account_name = app_config.get("AZURE_BLOB_ACCOUNT_NAME")
+ self.account_key = app_config.get("AZURE_BLOB_ACCOUNT_KEY")
def save(self, filename, data):
client = self._sync_client()
@@ -39,6 +38,7 @@ def generate(filename: str = filename) -> Generator:
blob = client.get_blob_client(container=self.bucket_name, blob=filename)
blob_data = blob.download_blob()
yield from blob_data.chunks()
+
return generate(filename)
def download(self, filename, target_filepath):
@@ -62,17 +62,17 @@ def delete(self, filename):
blob_container.delete_blob(filename)
def _sync_client(self):
- cache_key = 'azure_blob_sas_token_{}_{}'.format(self.account_name, self.account_key)
+ cache_key = "azure_blob_sas_token_{}_{}".format(self.account_name, self.account_key)
cache_result = redis_client.get(cache_key)
if cache_result is not None:
- sas_token = cache_result.decode('utf-8')
+ sas_token = cache_result.decode("utf-8")
else:
sas_token = generate_account_sas(
account_name=self.account_name,
account_key=self.account_key,
resource_types=ResourceTypes(service=True, container=True, object=True),
permission=AccountSasPermissions(read=True, write=True, delete=True, list=True, add=True, create=True),
- expiry=datetime.now(timezone.utc).replace(tzinfo=None) + timedelta(hours=1)
+ expiry=datetime.now(timezone.utc).replace(tzinfo=None) + timedelta(hours=1),
)
redis_client.set(cache_key, sas_token, ex=3000)
return BlobServiceClient(account_url=self.account_url, credential=sas_token)
diff --git a/api/extensions/storage/base_storage.py b/api/extensions/storage/base_storage.py
index 13d9c3429044c8..c3fe9ec82a5b41 100644
--- a/api/extensions/storage/base_storage.py
+++ b/api/extensions/storage/base_storage.py
@@ -1,4 +1,5 @@
"""Abstract interface for file storage implementations."""
+
from abc import ABC, abstractmethod
from collections.abc import Generator
@@ -6,8 +7,8 @@
class BaseStorage(ABC):
- """Interface for file storage.
- """
+ """Interface for file storage."""
+
app = None
def __init__(self, app: Flask):
diff --git a/api/extensions/storage/google_storage.py b/api/extensions/storage/google_storage.py
index ef6cd69039787c..9ed1fcf0b4e118 100644
--- a/api/extensions/storage/google_storage.py
+++ b/api/extensions/storage/google_storage.py
@@ -11,16 +11,16 @@
class GoogleStorage(BaseStorage):
- """Implementation for google storage.
- """
+ """Implementation for google storage."""
+
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
- self.bucket_name = app_config.get('GOOGLE_STORAGE_BUCKET_NAME')
- service_account_json_str = app_config.get('GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64')
+ self.bucket_name = app_config.get("GOOGLE_STORAGE_BUCKET_NAME")
+ service_account_json_str = app_config.get("GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64")
# if service_account_json_str is empty, use Application Default Credentials
if service_account_json_str:
- service_account_json = base64.b64decode(service_account_json_str).decode('utf-8')
+ service_account_json = base64.b64decode(service_account_json_str).decode("utf-8")
# convert str to object
service_account_obj = json.loads(service_account_json)
self.client = GoogleCloudStorage.Client.from_service_account_info(service_account_obj)
@@ -43,9 +43,10 @@ def load_stream(self, filename: str) -> Generator:
def generate(filename: str = filename) -> Generator:
bucket = self.client.get_bucket(self.bucket_name)
blob = bucket.get_blob(filename)
- with closing(blob.open(mode='rb')) as blob_stream:
+ with closing(blob.open(mode="rb")) as blob_stream:
while chunk := blob_stream.read(4096):
yield chunk
+
return generate()
def download(self, filename, target_filepath):
@@ -60,4 +61,4 @@ def exists(self, filename):
def delete(self, filename):
bucket = self.client.get_bucket(self.bucket_name)
- bucket.delete_blob(filename)
\ No newline at end of file
+ bucket.delete_blob(filename)
diff --git a/api/extensions/storage/local_storage.py b/api/extensions/storage/local_storage.py
index 389ef12f82bcd8..46ee4bf80f8e6d 100644
--- a/api/extensions/storage/local_storage.py
+++ b/api/extensions/storage/local_storage.py
@@ -8,21 +8,20 @@
class LocalStorage(BaseStorage):
- """Implementation for local storage.
- """
+ """Implementation for local storage."""
def __init__(self, app: Flask):
super().__init__(app)
- folder = self.app.config.get('STORAGE_LOCAL_PATH')
+ folder = self.app.config.get("STORAGE_LOCAL_PATH")
if not os.path.isabs(folder):
folder = os.path.join(app.root_path, folder)
self.folder = folder
def save(self, filename, data):
- if not self.folder or self.folder.endswith('/'):
+ if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
- filename = self.folder + '/' + filename
+ filename = self.folder + "/" + filename
folder = os.path.dirname(filename)
os.makedirs(folder, exist_ok=True)
@@ -31,10 +30,10 @@ def save(self, filename, data):
f.write(data)
def load_once(self, filename: str) -> bytes:
- if not self.folder or self.folder.endswith('/'):
+ if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
- filename = self.folder + '/' + filename
+ filename = self.folder + "/" + filename
if not os.path.exists(filename):
raise FileNotFoundError("File not found")
@@ -46,10 +45,10 @@ def load_once(self, filename: str) -> bytes:
def load_stream(self, filename: str) -> Generator:
def generate(filename: str = filename) -> Generator:
- if not self.folder or self.folder.endswith('/'):
+ if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
- filename = self.folder + '/' + filename
+ filename = self.folder + "/" + filename
if not os.path.exists(filename):
raise FileNotFoundError("File not found")
@@ -61,10 +60,10 @@ def generate(filename: str = filename) -> Generator:
return generate()
def download(self, filename, target_filepath):
- if not self.folder or self.folder.endswith('/'):
+ if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
- filename = self.folder + '/' + filename
+ filename = self.folder + "/" + filename
if not os.path.exists(filename):
raise FileNotFoundError("File not found")
@@ -72,17 +71,17 @@ def download(self, filename, target_filepath):
shutil.copyfile(filename, target_filepath)
def exists(self, filename):
- if not self.folder or self.folder.endswith('/'):
+ if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
- filename = self.folder + '/' + filename
+ filename = self.folder + "/" + filename
return os.path.exists(filename)
def delete(self, filename):
- if not self.folder or self.folder.endswith('/'):
+ if not self.folder or self.folder.endswith("/"):
filename = self.folder + filename
else:
- filename = self.folder + '/' + filename
+ filename = self.folder + "/" + filename
if os.path.exists(filename):
os.remove(filename)
diff --git a/api/extensions/storage/oci_storage.py b/api/extensions/storage/oci_storage.py
index e78d870950339b..e32fa0a0ae78a9 100644
--- a/api/extensions/storage/oci_storage.py
+++ b/api/extensions/storage/oci_storage.py
@@ -12,14 +12,14 @@ class OCIStorage(BaseStorage):
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
- self.bucket_name = app_config.get('OCI_BUCKET_NAME')
+ self.bucket_name = app_config.get("OCI_BUCKET_NAME")
self.client = boto3.client(
- 's3',
- aws_secret_access_key=app_config.get('OCI_SECRET_KEY'),
- aws_access_key_id=app_config.get('OCI_ACCESS_KEY'),
- endpoint_url=app_config.get('OCI_ENDPOINT'),
- region_name=app_config.get('OCI_REGION')
- )
+ "s3",
+ aws_secret_access_key=app_config.get("OCI_SECRET_KEY"),
+ aws_access_key_id=app_config.get("OCI_ACCESS_KEY"),
+ endpoint_url=app_config.get("OCI_ENDPOINT"),
+ region_name=app_config.get("OCI_REGION"),
+ )
def save(self, filename, data):
self.client.put_object(Bucket=self.bucket_name, Key=filename, Body=data)
@@ -27,9 +27,9 @@ def save(self, filename, data):
def load_once(self, filename: str) -> bytes:
try:
with closing(self.client) as client:
- data = client.get_object(Bucket=self.bucket_name, Key=filename)['Body'].read()
+ data = client.get_object(Bucket=self.bucket_name, Key=filename)["Body"].read()
except ClientError as ex:
- if ex.response['Error']['Code'] == 'NoSuchKey':
+ if ex.response["Error"]["Code"] == "NoSuchKey":
raise FileNotFoundError("File not found")
else:
raise
@@ -40,12 +40,13 @@ def generate(filename: str = filename) -> Generator:
try:
with closing(self.client) as client:
response = client.get_object(Bucket=self.bucket_name, Key=filename)
- yield from response['Body'].iter_chunks()
+ yield from response["Body"].iter_chunks()
except ClientError as ex:
- if ex.response['Error']['Code'] == 'NoSuchKey':
+ if ex.response["Error"]["Code"] == "NoSuchKey":
raise FileNotFoundError("File not found")
else:
raise
+
return generate()
def download(self, filename, target_filepath):
@@ -61,4 +62,4 @@ def exists(self, filename):
return False
def delete(self, filename):
- self.client.delete_object(Bucket=self.bucket_name, Key=filename)
\ No newline at end of file
+ self.client.delete_object(Bucket=self.bucket_name, Key=filename)
diff --git a/api/extensions/storage/s3_storage.py b/api/extensions/storage/s3_storage.py
index 787596fa791d4a..022ce5b14a7b88 100644
--- a/api/extensions/storage/s3_storage.py
+++ b/api/extensions/storage/s3_storage.py
@@ -10,24 +10,24 @@
class S3Storage(BaseStorage):
- """Implementation for s3 storage.
- """
+ """Implementation for s3 storage."""
+
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
- self.bucket_name = app_config.get('S3_BUCKET_NAME')
- if app_config.get('S3_USE_AWS_MANAGED_IAM'):
+ self.bucket_name = app_config.get("S3_BUCKET_NAME")
+ if app_config.get("S3_USE_AWS_MANAGED_IAM"):
session = boto3.Session()
- self.client = session.client('s3')
+ self.client = session.client("s3")
else:
self.client = boto3.client(
- 's3',
- aws_secret_access_key=app_config.get('S3_SECRET_KEY'),
- aws_access_key_id=app_config.get('S3_ACCESS_KEY'),
- endpoint_url=app_config.get('S3_ENDPOINT'),
- region_name=app_config.get('S3_REGION'),
- config=Config(s3={'addressing_style': app_config.get('S3_ADDRESS_STYLE')})
- )
+ "s3",
+ aws_secret_access_key=app_config.get("S3_SECRET_KEY"),
+ aws_access_key_id=app_config.get("S3_ACCESS_KEY"),
+ endpoint_url=app_config.get("S3_ENDPOINT"),
+ region_name=app_config.get("S3_REGION"),
+ config=Config(s3={"addressing_style": app_config.get("S3_ADDRESS_STYLE")}),
+ )
def save(self, filename, data):
self.client.put_object(Bucket=self.bucket_name, Key=filename, Body=data)
@@ -35,9 +35,9 @@ def save(self, filename, data):
def load_once(self, filename: str) -> bytes:
try:
with closing(self.client) as client:
- data = client.get_object(Bucket=self.bucket_name, Key=filename)['Body'].read()
+ data = client.get_object(Bucket=self.bucket_name, Key=filename)["Body"].read()
except ClientError as ex:
- if ex.response['Error']['Code'] == 'NoSuchKey':
+ if ex.response["Error"]["Code"] == "NoSuchKey":
raise FileNotFoundError("File not found")
else:
raise
@@ -48,12 +48,13 @@ def generate(filename: str = filename) -> Generator:
try:
with closing(self.client) as client:
response = client.get_object(Bucket=self.bucket_name, Key=filename)
- yield from response['Body'].iter_chunks()
+ yield from response["Body"].iter_chunks()
except ClientError as ex:
- if ex.response['Error']['Code'] == 'NoSuchKey':
+ if ex.response["Error"]["Code"] == "NoSuchKey":
raise FileNotFoundError("File not found")
else:
raise
+
return generate()
def download(self, filename, target_filepath):
diff --git a/api/extensions/storage/tencent_storage.py b/api/extensions/storage/tencent_storage.py
index e2c1ca55e3c434..1d499cd3bcea1c 100644
--- a/api/extensions/storage/tencent_storage.py
+++ b/api/extensions/storage/tencent_storage.py
@@ -7,18 +7,17 @@
class TencentStorage(BaseStorage):
- """Implementation for tencent cos storage.
- """
+ """Implementation for tencent cos storage."""
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
- self.bucket_name = app_config.get('TENCENT_COS_BUCKET_NAME')
+ self.bucket_name = app_config.get("TENCENT_COS_BUCKET_NAME")
config = CosConfig(
- Region=app_config.get('TENCENT_COS_REGION'),
- SecretId=app_config.get('TENCENT_COS_SECRET_ID'),
- SecretKey=app_config.get('TENCENT_COS_SECRET_KEY'),
- Scheme=app_config.get('TENCENT_COS_SCHEME'),
+ Region=app_config.get("TENCENT_COS_REGION"),
+ SecretId=app_config.get("TENCENT_COS_SECRET_ID"),
+ SecretKey=app_config.get("TENCENT_COS_SECRET_KEY"),
+ Scheme=app_config.get("TENCENT_COS_SCHEME"),
)
self.client = CosS3Client(config)
@@ -26,19 +25,19 @@ def save(self, filename, data):
self.client.put_object(Bucket=self.bucket_name, Body=data, Key=filename)
def load_once(self, filename: str) -> bytes:
- data = self.client.get_object(Bucket=self.bucket_name, Key=filename)['Body'].get_raw_stream().read()
+ data = self.client.get_object(Bucket=self.bucket_name, Key=filename)["Body"].get_raw_stream().read()
return data
def load_stream(self, filename: str) -> Generator:
def generate(filename: str = filename) -> Generator:
response = self.client.get_object(Bucket=self.bucket_name, Key=filename)
- yield from response['Body'].get_stream(chunk_size=4096)
+ yield from response["Body"].get_stream(chunk_size=4096)
return generate()
def download(self, filename, target_filepath):
response = self.client.get_object(Bucket=self.bucket_name, Key=filename)
- response['Body'].get_stream_to_file(target_filepath)
+ response["Body"].get_stream_to_file(target_filepath)
def exists(self, filename):
return self.client.object_exists(Bucket=self.bucket_name, Key=filename)
diff --git a/api/fields/annotation_fields.py b/api/fields/annotation_fields.py
index c77808447519fc..379dcc6d16fe56 100644
--- a/api/fields/annotation_fields.py
+++ b/api/fields/annotation_fields.py
@@ -5,7 +5,7 @@
annotation_fields = {
"id": fields.String,
"question": fields.String,
- "answer": fields.Raw(attribute='content'),
+ "answer": fields.Raw(attribute="content"),
"hit_count": fields.Integer,
"created_at": TimestampField,
# 'account': fields.Nested(simple_account_fields, allow_null=True)
@@ -21,8 +21,8 @@
"score": fields.Float,
"question": fields.String,
"created_at": TimestampField,
- "match": fields.String(attribute='annotation_question'),
- "response": fields.String(attribute='annotation_content')
+ "match": fields.String(attribute="annotation_question"),
+ "response": fields.String(attribute="annotation_content"),
}
annotation_hit_history_list_fields = {
diff --git a/api/fields/api_based_extension_fields.py b/api/fields/api_based_extension_fields.py
index 749e9900de181d..a85d4a34dbe7b1 100644
--- a/api/fields/api_based_extension_fields.py
+++ b/api/fields/api_based_extension_fields.py
@@ -8,16 +8,16 @@ def output(self, key, obj):
api_key = obj.api_key
# If the length of the api_key is less than 8 characters, show the first and last characters
if len(api_key) <= 8:
- return api_key[0] + '******' + api_key[-1]
+ return api_key[0] + "******" + api_key[-1]
# If the api_key is greater than 8 characters, show the first three and the last three characters
else:
- return api_key[:3] + '******' + api_key[-3:]
+ return api_key[:3] + "******" + api_key[-3:]
api_based_extension_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'api_endpoint': fields.String,
- 'api_key': HiddenAPIKey,
- 'created_at': TimestampField
+ "id": fields.String,
+ "name": fields.String,
+ "api_endpoint": fields.String,
+ "api_key": HiddenAPIKey,
+ "created_at": TimestampField,
}
diff --git a/api/fields/app_fields.py b/api/fields/app_fields.py
index 94d804a919869f..7036d58e4ab3a2 100644
--- a/api/fields/app_fields.py
+++ b/api/fields/app_fields.py
@@ -3,157 +3,153 @@
from libs.helper import TimestampField
app_detail_kernel_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'description': fields.String,
- 'mode': fields.String(attribute='mode_compatible_with_agent'),
- 'icon': fields.String,
- 'icon_background': fields.String,
+ "id": fields.String,
+ "name": fields.String,
+ "description": fields.String,
+ "mode": fields.String(attribute="mode_compatible_with_agent"),
+ "icon": fields.String,
+ "icon_background": fields.String,
}
related_app_list = {
- 'data': fields.List(fields.Nested(app_detail_kernel_fields)),
- 'total': fields.Integer,
+ "data": fields.List(fields.Nested(app_detail_kernel_fields)),
+ "total": fields.Integer,
}
model_config_fields = {
- 'opening_statement': fields.String,
- 'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
- 'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
- 'speech_to_text': fields.Raw(attribute='speech_to_text_dict'),
- 'text_to_speech': fields.Raw(attribute='text_to_speech_dict'),
- 'retriever_resource': fields.Raw(attribute='retriever_resource_dict'),
- 'annotation_reply': fields.Raw(attribute='annotation_reply_dict'),
- 'more_like_this': fields.Raw(attribute='more_like_this_dict'),
- 'sensitive_word_avoidance': fields.Raw(attribute='sensitive_word_avoidance_dict'),
- 'external_data_tools': fields.Raw(attribute='external_data_tools_list'),
- 'model': fields.Raw(attribute='model_dict'),
- 'user_input_form': fields.Raw(attribute='user_input_form_list'),
- 'dataset_query_variable': fields.String,
- 'pre_prompt': fields.String,
- 'agent_mode': fields.Raw(attribute='agent_mode_dict'),
- 'prompt_type': fields.String,
- 'chat_prompt_config': fields.Raw(attribute='chat_prompt_config_dict'),
- 'completion_prompt_config': fields.Raw(attribute='completion_prompt_config_dict'),
- 'dataset_configs': fields.Raw(attribute='dataset_configs_dict'),
- 'file_upload': fields.Raw(attribute='file_upload_dict'),
- 'created_at': TimestampField
+ "opening_statement": fields.String,
+ "suggested_questions": fields.Raw(attribute="suggested_questions_list"),
+ "suggested_questions_after_answer": fields.Raw(attribute="suggested_questions_after_answer_dict"),
+ "speech_to_text": fields.Raw(attribute="speech_to_text_dict"),
+ "text_to_speech": fields.Raw(attribute="text_to_speech_dict"),
+ "retriever_resource": fields.Raw(attribute="retriever_resource_dict"),
+ "annotation_reply": fields.Raw(attribute="annotation_reply_dict"),
+ "more_like_this": fields.Raw(attribute="more_like_this_dict"),
+ "sensitive_word_avoidance": fields.Raw(attribute="sensitive_word_avoidance_dict"),
+ "external_data_tools": fields.Raw(attribute="external_data_tools_list"),
+ "model": fields.Raw(attribute="model_dict"),
+ "user_input_form": fields.Raw(attribute="user_input_form_list"),
+ "dataset_query_variable": fields.String,
+ "pre_prompt": fields.String,
+ "agent_mode": fields.Raw(attribute="agent_mode_dict"),
+ "prompt_type": fields.String,
+ "chat_prompt_config": fields.Raw(attribute="chat_prompt_config_dict"),
+ "completion_prompt_config": fields.Raw(attribute="completion_prompt_config_dict"),
+ "dataset_configs": fields.Raw(attribute="dataset_configs_dict"),
+ "file_upload": fields.Raw(attribute="file_upload_dict"),
+ "created_at": TimestampField,
}
app_detail_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'description': fields.String,
- 'mode': fields.String(attribute='mode_compatible_with_agent'),
- 'icon': fields.String,
- 'icon_background': fields.String,
- 'enable_site': fields.Boolean,
- 'enable_api': fields.Boolean,
- 'model_config': fields.Nested(model_config_fields, attribute='app_model_config', allow_null=True),
- 'tracing': fields.Raw,
- 'created_at': TimestampField
+ "id": fields.String,
+ "name": fields.String,
+ "description": fields.String,
+ "mode": fields.String(attribute="mode_compatible_with_agent"),
+ "icon": fields.String,
+ "icon_background": fields.String,
+ "enable_site": fields.Boolean,
+ "enable_api": fields.Boolean,
+ "model_config": fields.Nested(model_config_fields, attribute="app_model_config", allow_null=True),
+ "tracing": fields.Raw,
+ "created_at": TimestampField,
}
prompt_config_fields = {
- 'prompt_template': fields.String,
+ "prompt_template": fields.String,
}
model_config_partial_fields = {
- 'model': fields.Raw(attribute='model_dict'),
- 'pre_prompt': fields.String,
+ "model": fields.Raw(attribute="model_dict"),
+ "pre_prompt": fields.String,
}
-tag_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'type': fields.String
-}
+tag_fields = {"id": fields.String, "name": fields.String, "type": fields.String}
app_partial_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'max_active_requests': fields.Raw(),
- 'description': fields.String(attribute='desc_or_prompt'),
- 'mode': fields.String(attribute='mode_compatible_with_agent'),
- 'icon': fields.String,
- 'icon_background': fields.String,
- 'model_config': fields.Nested(model_config_partial_fields, attribute='app_model_config', allow_null=True),
- 'created_at': TimestampField,
- 'tags': fields.List(fields.Nested(tag_fields))
+ "id": fields.String,
+ "name": fields.String,
+ "max_active_requests": fields.Raw(),
+ "description": fields.String(attribute="desc_or_prompt"),
+ "mode": fields.String(attribute="mode_compatible_with_agent"),
+ "icon": fields.String,
+ "icon_background": fields.String,
+ "model_config": fields.Nested(model_config_partial_fields, attribute="app_model_config", allow_null=True),
+ "created_at": TimestampField,
+ "tags": fields.List(fields.Nested(tag_fields)),
}
app_pagination_fields = {
- 'page': fields.Integer,
- 'limit': fields.Integer(attribute='per_page'),
- 'total': fields.Integer,
- 'has_more': fields.Boolean(attribute='has_next'),
- 'data': fields.List(fields.Nested(app_partial_fields), attribute='items')
+ "page": fields.Integer,
+ "limit": fields.Integer(attribute="per_page"),
+ "total": fields.Integer,
+ "has_more": fields.Boolean(attribute="has_next"),
+ "data": fields.List(fields.Nested(app_partial_fields), attribute="items"),
}
template_fields = {
- 'name': fields.String,
- 'icon': fields.String,
- 'icon_background': fields.String,
- 'description': fields.String,
- 'mode': fields.String,
- 'model_config': fields.Nested(model_config_fields),
+ "name": fields.String,
+ "icon": fields.String,
+ "icon_background": fields.String,
+ "description": fields.String,
+ "mode": fields.String,
+ "model_config": fields.Nested(model_config_fields),
}
template_list_fields = {
- 'data': fields.List(fields.Nested(template_fields)),
+ "data": fields.List(fields.Nested(template_fields)),
}
site_fields = {
- 'access_token': fields.String(attribute='code'),
- 'code': fields.String,
- 'title': fields.String,
- 'icon': fields.String,
- 'icon_background': fields.String,
- 'description': fields.String,
- 'default_language': fields.String,
- 'chat_color_theme': fields.String,
- 'chat_color_theme_inverted': fields.Boolean,
- 'customize_domain': fields.String,
- 'copyright': fields.String,
- 'privacy_policy': fields.String,
- 'custom_disclaimer': fields.String,
- 'customize_token_strategy': fields.String,
- 'prompt_public': fields.Boolean,
- 'app_base_url': fields.String,
- 'show_workflow_steps': fields.Boolean,
+ "access_token": fields.String(attribute="code"),
+ "code": fields.String,
+ "title": fields.String,
+ "icon": fields.String,
+ "icon_background": fields.String,
+ "description": fields.String,
+ "default_language": fields.String,
+ "chat_color_theme": fields.String,
+ "chat_color_theme_inverted": fields.Boolean,
+ "customize_domain": fields.String,
+ "copyright": fields.String,
+ "privacy_policy": fields.String,
+ "custom_disclaimer": fields.String,
+ "customize_token_strategy": fields.String,
+ "prompt_public": fields.Boolean,
+ "app_base_url": fields.String,
+ "show_workflow_steps": fields.Boolean,
}
app_detail_fields_with_site = {
- 'id': fields.String,
- 'name': fields.String,
- 'description': fields.String,
- 'mode': fields.String(attribute='mode_compatible_with_agent'),
- 'icon': fields.String,
- 'icon_background': fields.String,
- 'enable_site': fields.Boolean,
- 'enable_api': fields.Boolean,
- 'model_config': fields.Nested(model_config_fields, attribute='app_model_config', allow_null=True),
- 'site': fields.Nested(site_fields),
- 'api_base_url': fields.String,
- 'created_at': TimestampField,
- 'deleted_tools': fields.List(fields.String),
+ "id": fields.String,
+ "name": fields.String,
+ "description": fields.String,
+ "mode": fields.String(attribute="mode_compatible_with_agent"),
+ "icon": fields.String,
+ "icon_background": fields.String,
+ "enable_site": fields.Boolean,
+ "enable_api": fields.Boolean,
+ "model_config": fields.Nested(model_config_fields, attribute="app_model_config", allow_null=True),
+ "site": fields.Nested(site_fields),
+ "api_base_url": fields.String,
+ "created_at": TimestampField,
+ "deleted_tools": fields.List(fields.String),
}
app_site_fields = {
- 'app_id': fields.String,
- 'access_token': fields.String(attribute='code'),
- 'code': fields.String,
- 'title': fields.String,
- 'icon': fields.String,
- 'icon_background': fields.String,
- 'description': fields.String,
- 'default_language': fields.String,
- 'customize_domain': fields.String,
- 'copyright': fields.String,
- 'privacy_policy': fields.String,
- 'custom_disclaimer': fields.String,
- 'customize_token_strategy': fields.String,
- 'prompt_public': fields.Boolean,
- 'show_workflow_steps': fields.Boolean,
+ "app_id": fields.String,
+ "access_token": fields.String(attribute="code"),
+ "code": fields.String,
+ "title": fields.String,
+ "icon": fields.String,
+ "icon_background": fields.String,
+ "description": fields.String,
+ "default_language": fields.String,
+ "customize_domain": fields.String,
+ "copyright": fields.String,
+ "privacy_policy": fields.String,
+ "custom_disclaimer": fields.String,
+ "customize_token_strategy": fields.String,
+ "prompt_public": fields.Boolean,
+ "show_workflow_steps": fields.Boolean,
}
diff --git a/api/fields/conversation_fields.py b/api/fields/conversation_fields.py
index 79ceb026852792..1b15fe38800b3e 100644
--- a/api/fields/conversation_fields.py
+++ b/api/fields/conversation_fields.py
@@ -6,205 +6,202 @@
class MessageTextField(fields.Raw):
def format(self, value):
- return value[0]['text'] if value else ''
+ return value[0]["text"] if value else ""
feedback_fields = {
- 'rating': fields.String,
- 'content': fields.String,
- 'from_source': fields.String,
- 'from_end_user_id': fields.String,
- 'from_account': fields.Nested(simple_account_fields, allow_null=True),
+ "rating": fields.String,
+ "content": fields.String,
+ "from_source": fields.String,
+ "from_end_user_id": fields.String,
+ "from_account": fields.Nested(simple_account_fields, allow_null=True),
}
annotation_fields = {
- 'id': fields.String,
- 'question': fields.String,
- 'content': fields.String,
- 'account': fields.Nested(simple_account_fields, allow_null=True),
- 'created_at': TimestampField
+ "id": fields.String,
+ "question": fields.String,
+ "content": fields.String,
+ "account": fields.Nested(simple_account_fields, allow_null=True),
+ "created_at": TimestampField,
}
annotation_hit_history_fields = {
- 'annotation_id': fields.String(attribute='id'),
- 'annotation_create_account': fields.Nested(simple_account_fields, allow_null=True),
- 'created_at': TimestampField
+ "annotation_id": fields.String(attribute="id"),
+ "annotation_create_account": fields.Nested(simple_account_fields, allow_null=True),
+ "created_at": TimestampField,
}
message_file_fields = {
- 'id': fields.String,
- 'type': fields.String,
- 'url': fields.String,
- 'belongs_to': fields.String(default='user'),
+ "id": fields.String,
+ "type": fields.String,
+ "url": fields.String,
+ "belongs_to": fields.String(default="user"),
}
agent_thought_fields = {
- 'id': fields.String,
- 'chain_id': fields.String,
- 'message_id': fields.String,
- 'position': fields.Integer,
- 'thought': fields.String,
- 'tool': fields.String,
- 'tool_labels': fields.Raw,
- 'tool_input': fields.String,
- 'created_at': TimestampField,
- 'observation': fields.String,
- 'files': fields.List(fields.String),
+ "id": fields.String,
+ "chain_id": fields.String,
+ "message_id": fields.String,
+ "position": fields.Integer,
+ "thought": fields.String,
+ "tool": fields.String,
+ "tool_labels": fields.Raw,
+ "tool_input": fields.String,
+ "created_at": TimestampField,
+ "observation": fields.String,
+ "files": fields.List(fields.String),
}
message_detail_fields = {
- 'id': fields.String,
- 'conversation_id': fields.String,
- 'inputs': fields.Raw,
- 'query': fields.String,
- 'message': fields.Raw,
- 'message_tokens': fields.Integer,
- 'answer': fields.String(attribute='re_sign_file_url_answer'),
- 'answer_tokens': fields.Integer,
- 'provider_response_latency': fields.Float,
- 'from_source': fields.String,
- 'from_end_user_id': fields.String,
- 'from_account_id': fields.String,
- 'feedbacks': fields.List(fields.Nested(feedback_fields)),
- 'workflow_run_id': fields.String,
- 'annotation': fields.Nested(annotation_fields, allow_null=True),
- 'annotation_hit_history': fields.Nested(annotation_hit_history_fields, allow_null=True),
- 'created_at': TimestampField,
- 'agent_thoughts': fields.List(fields.Nested(agent_thought_fields)),
- 'message_files': fields.List(fields.Nested(message_file_fields), attribute='files'),
- 'metadata': fields.Raw(attribute='message_metadata_dict'),
- 'status': fields.String,
- 'error': fields.String,
-}
-
-feedback_stat_fields = {
- 'like': fields.Integer,
- 'dislike': fields.Integer
-}
+ "id": fields.String,
+ "conversation_id": fields.String,
+ "inputs": fields.Raw,
+ "query": fields.String,
+ "message": fields.Raw,
+ "message_tokens": fields.Integer,
+ "answer": fields.String(attribute="re_sign_file_url_answer"),
+ "answer_tokens": fields.Integer,
+ "provider_response_latency": fields.Float,
+ "from_source": fields.String,
+ "from_end_user_id": fields.String,
+ "from_account_id": fields.String,
+ "feedbacks": fields.List(fields.Nested(feedback_fields)),
+ "workflow_run_id": fields.String,
+ "annotation": fields.Nested(annotation_fields, allow_null=True),
+ "annotation_hit_history": fields.Nested(annotation_hit_history_fields, allow_null=True),
+ "created_at": TimestampField,
+ "agent_thoughts": fields.List(fields.Nested(agent_thought_fields)),
+ "message_files": fields.List(fields.Nested(message_file_fields), attribute="files"),
+ "metadata": fields.Raw(attribute="message_metadata_dict"),
+ "status": fields.String,
+ "error": fields.String,
+}
+
+feedback_stat_fields = {"like": fields.Integer, "dislike": fields.Integer}
model_config_fields = {
- 'opening_statement': fields.String,
- 'suggested_questions': fields.Raw,
- 'model': fields.Raw,
- 'user_input_form': fields.Raw,
- 'pre_prompt': fields.String,
- 'agent_mode': fields.Raw,
+ "opening_statement": fields.String,
+ "suggested_questions": fields.Raw,
+ "model": fields.Raw,
+ "user_input_form": fields.Raw,
+ "pre_prompt": fields.String,
+ "agent_mode": fields.Raw,
}
simple_configs_fields = {
- 'prompt_template': fields.String,
+ "prompt_template": fields.String,
}
simple_model_config_fields = {
- 'model': fields.Raw(attribute='model_dict'),
- 'pre_prompt': fields.String,
+ "model": fields.Raw(attribute="model_dict"),
+ "pre_prompt": fields.String,
}
simple_message_detail_fields = {
- 'inputs': fields.Raw,
- 'query': fields.String,
- 'message': MessageTextField,
- 'answer': fields.String,
+ "inputs": fields.Raw,
+ "query": fields.String,
+ "message": MessageTextField,
+ "answer": fields.String,
}
conversation_fields = {
- 'id': fields.String,
- 'status': fields.String,
- 'from_source': fields.String,
- 'from_end_user_id': fields.String,
- 'from_end_user_session_id': fields.String(),
- 'from_account_id': fields.String,
- 'read_at': TimestampField,
- 'created_at': TimestampField,
- 'annotation': fields.Nested(annotation_fields, allow_null=True),
- 'model_config': fields.Nested(simple_model_config_fields),
- 'user_feedback_stats': fields.Nested(feedback_stat_fields),
- 'admin_feedback_stats': fields.Nested(feedback_stat_fields),
- 'message': fields.Nested(simple_message_detail_fields, attribute='first_message')
+ "id": fields.String,
+ "status": fields.String,
+ "from_source": fields.String,
+ "from_end_user_id": fields.String,
+ "from_end_user_session_id": fields.String(),
+ "from_account_id": fields.String,
+ "read_at": TimestampField,
+ "created_at": TimestampField,
+ "annotation": fields.Nested(annotation_fields, allow_null=True),
+ "model_config": fields.Nested(simple_model_config_fields),
+ "user_feedback_stats": fields.Nested(feedback_stat_fields),
+ "admin_feedback_stats": fields.Nested(feedback_stat_fields),
+ "message": fields.Nested(simple_message_detail_fields, attribute="first_message"),
}
conversation_pagination_fields = {
- 'page': fields.Integer,
- 'limit': fields.Integer(attribute='per_page'),
- 'total': fields.Integer,
- 'has_more': fields.Boolean(attribute='has_next'),
- 'data': fields.List(fields.Nested(conversation_fields), attribute='items')
+ "page": fields.Integer,
+ "limit": fields.Integer(attribute="per_page"),
+ "total": fields.Integer,
+ "has_more": fields.Boolean(attribute="has_next"),
+ "data": fields.List(fields.Nested(conversation_fields), attribute="items"),
}
conversation_message_detail_fields = {
- 'id': fields.String,
- 'status': fields.String,
- 'from_source': fields.String,
- 'from_end_user_id': fields.String,
- 'from_account_id': fields.String,
- 'created_at': TimestampField,
- 'model_config': fields.Nested(model_config_fields),
- 'message': fields.Nested(message_detail_fields, attribute='first_message'),
+ "id": fields.String,
+ "status": fields.String,
+ "from_source": fields.String,
+ "from_end_user_id": fields.String,
+ "from_account_id": fields.String,
+ "created_at": TimestampField,
+ "model_config": fields.Nested(model_config_fields),
+ "message": fields.Nested(message_detail_fields, attribute="first_message"),
}
conversation_with_summary_fields = {
- 'id': fields.String,
- 'status': fields.String,
- 'from_source': fields.String,
- 'from_end_user_id': fields.String,
- 'from_end_user_session_id': fields.String,
- 'from_account_id': fields.String,
- 'name': fields.String,
- 'summary': fields.String(attribute='summary_or_query'),
- 'read_at': TimestampField,
- 'created_at': TimestampField,
- 'annotated': fields.Boolean,
- 'model_config': fields.Nested(simple_model_config_fields),
- 'message_count': fields.Integer,
- 'user_feedback_stats': fields.Nested(feedback_stat_fields),
- 'admin_feedback_stats': fields.Nested(feedback_stat_fields)
+ "id": fields.String,
+ "status": fields.String,
+ "from_source": fields.String,
+ "from_end_user_id": fields.String,
+ "from_end_user_session_id": fields.String,
+ "from_account_id": fields.String,
+ "name": fields.String,
+ "summary": fields.String(attribute="summary_or_query"),
+ "read_at": TimestampField,
+ "created_at": TimestampField,
+ "annotated": fields.Boolean,
+ "model_config": fields.Nested(simple_model_config_fields),
+ "message_count": fields.Integer,
+ "user_feedback_stats": fields.Nested(feedback_stat_fields),
+ "admin_feedback_stats": fields.Nested(feedback_stat_fields),
}
conversation_with_summary_pagination_fields = {
- 'page': fields.Integer,
- 'limit': fields.Integer(attribute='per_page'),
- 'total': fields.Integer,
- 'has_more': fields.Boolean(attribute='has_next'),
- 'data': fields.List(fields.Nested(conversation_with_summary_fields), attribute='items')
+ "page": fields.Integer,
+ "limit": fields.Integer(attribute="per_page"),
+ "total": fields.Integer,
+ "has_more": fields.Boolean(attribute="has_next"),
+ "data": fields.List(fields.Nested(conversation_with_summary_fields), attribute="items"),
}
conversation_detail_fields = {
- 'id': fields.String,
- 'status': fields.String,
- 'from_source': fields.String,
- 'from_end_user_id': fields.String,
- 'from_account_id': fields.String,
- 'created_at': TimestampField,
- 'annotated': fields.Boolean,
- 'introduction': fields.String,
- 'model_config': fields.Nested(model_config_fields),
- 'message_count': fields.Integer,
- 'user_feedback_stats': fields.Nested(feedback_stat_fields),
- 'admin_feedback_stats': fields.Nested(feedback_stat_fields)
+ "id": fields.String,
+ "status": fields.String,
+ "from_source": fields.String,
+ "from_end_user_id": fields.String,
+ "from_account_id": fields.String,
+ "created_at": TimestampField,
+ "annotated": fields.Boolean,
+ "introduction": fields.String,
+ "model_config": fields.Nested(model_config_fields),
+ "message_count": fields.Integer,
+ "user_feedback_stats": fields.Nested(feedback_stat_fields),
+ "admin_feedback_stats": fields.Nested(feedback_stat_fields),
}
simple_conversation_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'inputs': fields.Raw,
- 'status': fields.String,
- 'introduction': fields.String,
- 'created_at': TimestampField
+ "id": fields.String,
+ "name": fields.String,
+ "inputs": fields.Raw,
+ "status": fields.String,
+ "introduction": fields.String,
+ "created_at": TimestampField,
}
conversation_infinite_scroll_pagination_fields = {
- 'limit': fields.Integer,
- 'has_more': fields.Boolean,
- 'data': fields.List(fields.Nested(simple_conversation_fields))
+ "limit": fields.Integer,
+ "has_more": fields.Boolean,
+ "data": fields.List(fields.Nested(simple_conversation_fields)),
}
conversation_with_model_config_fields = {
**simple_conversation_fields,
- 'model_config': fields.Raw,
+ "model_config": fields.Raw,
}
conversation_with_model_config_infinite_scroll_pagination_fields = {
- 'limit': fields.Integer,
- 'has_more': fields.Boolean,
- 'data': fields.List(fields.Nested(conversation_with_model_config_fields))
+ "limit": fields.Integer,
+ "has_more": fields.Boolean,
+ "data": fields.List(fields.Nested(conversation_with_model_config_fields)),
}
diff --git a/api/fields/conversation_variable_fields.py b/api/fields/conversation_variable_fields.py
new file mode 100644
index 00000000000000..983e50e73ceb9f
--- /dev/null
+++ b/api/fields/conversation_variable_fields.py
@@ -0,0 +1,21 @@
+from flask_restful import fields
+
+from libs.helper import TimestampField
+
+conversation_variable_fields = {
+ "id": fields.String,
+ "name": fields.String,
+ "value_type": fields.String(attribute="value_type.value"),
+ "value": fields.String,
+ "description": fields.String,
+ "created_at": TimestampField,
+ "updated_at": TimestampField,
+}
+
+paginated_conversation_variable_fields = {
+ "page": fields.Integer,
+ "limit": fields.Integer,
+ "total": fields.Integer,
+ "has_more": fields.Boolean,
+ "data": fields.List(fields.Nested(conversation_variable_fields), attribute="data"),
+}
diff --git a/api/fields/data_source_fields.py b/api/fields/data_source_fields.py
index 6f3c920c85b60f..071071376fe6c8 100644
--- a/api/fields/data_source_fields.py
+++ b/api/fields/data_source_fields.py
@@ -2,64 +2,56 @@
from libs.helper import TimestampField
-integrate_icon_fields = {
- 'type': fields.String,
- 'url': fields.String,
- 'emoji': fields.String
-}
+integrate_icon_fields = {"type": fields.String, "url": fields.String, "emoji": fields.String}
integrate_page_fields = {
- 'page_name': fields.String,
- 'page_id': fields.String,
- 'page_icon': fields.Nested(integrate_icon_fields, allow_null=True),
- 'is_bound': fields.Boolean,
- 'parent_id': fields.String,
- 'type': fields.String
+ "page_name": fields.String,
+ "page_id": fields.String,
+ "page_icon": fields.Nested(integrate_icon_fields, allow_null=True),
+ "is_bound": fields.Boolean,
+ "parent_id": fields.String,
+ "type": fields.String,
}
integrate_workspace_fields = {
- 'workspace_name': fields.String,
- 'workspace_id': fields.String,
- 'workspace_icon': fields.String,
- 'pages': fields.List(fields.Nested(integrate_page_fields))
+ "workspace_name": fields.String,
+ "workspace_id": fields.String,
+ "workspace_icon": fields.String,
+ "pages": fields.List(fields.Nested(integrate_page_fields)),
}
integrate_notion_info_list_fields = {
- 'notion_info': fields.List(fields.Nested(integrate_workspace_fields)),
+ "notion_info": fields.List(fields.Nested(integrate_workspace_fields)),
}
-integrate_icon_fields = {
- 'type': fields.String,
- 'url': fields.String,
- 'emoji': fields.String
-}
+integrate_icon_fields = {"type": fields.String, "url": fields.String, "emoji": fields.String}
integrate_page_fields = {
- 'page_name': fields.String,
- 'page_id': fields.String,
- 'page_icon': fields.Nested(integrate_icon_fields, allow_null=True),
- 'parent_id': fields.String,
- 'type': fields.String
+ "page_name": fields.String,
+ "page_id": fields.String,
+ "page_icon": fields.Nested(integrate_icon_fields, allow_null=True),
+ "parent_id": fields.String,
+ "type": fields.String,
}
integrate_workspace_fields = {
- 'workspace_name': fields.String,
- 'workspace_id': fields.String,
- 'workspace_icon': fields.String,
- 'pages': fields.List(fields.Nested(integrate_page_fields)),
- 'total': fields.Integer
+ "workspace_name": fields.String,
+ "workspace_id": fields.String,
+ "workspace_icon": fields.String,
+ "pages": fields.List(fields.Nested(integrate_page_fields)),
+ "total": fields.Integer,
}
integrate_fields = {
- 'id': fields.String,
- 'provider': fields.String,
- 'created_at': TimestampField,
- 'is_bound': fields.Boolean,
- 'disabled': fields.Boolean,
- 'link': fields.String,
- 'source_info': fields.Nested(integrate_workspace_fields)
+ "id": fields.String,
+ "provider": fields.String,
+ "created_at": TimestampField,
+ "is_bound": fields.Boolean,
+ "disabled": fields.Boolean,
+ "link": fields.String,
+ "source_info": fields.Nested(integrate_workspace_fields),
}
integrate_list_fields = {
- 'data': fields.List(fields.Nested(integrate_fields)),
-}
\ No newline at end of file
+ "data": fields.List(fields.Nested(integrate_fields)),
+}
diff --git a/api/fields/dataset_fields.py b/api/fields/dataset_fields.py
index a9f79b5c678e7c..9cf8da7acdc984 100644
--- a/api/fields/dataset_fields.py
+++ b/api/fields/dataset_fields.py
@@ -3,73 +3,64 @@
from libs.helper import TimestampField
dataset_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'description': fields.String,
- 'permission': fields.String,
- 'data_source_type': fields.String,
- 'indexing_technique': fields.String,
- 'created_by': fields.String,
- 'created_at': TimestampField,
+ "id": fields.String,
+ "name": fields.String,
+ "description": fields.String,
+ "permission": fields.String,
+ "data_source_type": fields.String,
+ "indexing_technique": fields.String,
+ "created_by": fields.String,
+ "created_at": TimestampField,
}
-reranking_model_fields = {
- 'reranking_provider_name': fields.String,
- 'reranking_model_name': fields.String
-}
+reranking_model_fields = {"reranking_provider_name": fields.String, "reranking_model_name": fields.String}
-keyword_setting_fields = {
- 'keyword_weight': fields.Float
-}
+keyword_setting_fields = {"keyword_weight": fields.Float}
vector_setting_fields = {
- 'vector_weight': fields.Float,
- 'embedding_model_name': fields.String,
- 'embedding_provider_name': fields.String,
+ "vector_weight": fields.Float,
+ "embedding_model_name": fields.String,
+ "embedding_provider_name": fields.String,
}
weighted_score_fields = {
- 'keyword_setting': fields.Nested(keyword_setting_fields),
- 'vector_setting': fields.Nested(vector_setting_fields),
+ "keyword_setting": fields.Nested(keyword_setting_fields),
+ "vector_setting": fields.Nested(vector_setting_fields),
}
dataset_retrieval_model_fields = {
- 'search_method': fields.String,
- 'reranking_enable': fields.Boolean,
- 'reranking_mode': fields.String,
- 'reranking_model': fields.Nested(reranking_model_fields),
- 'weights': fields.Nested(weighted_score_fields, allow_null=True),
- 'top_k': fields.Integer,
- 'score_threshold_enabled': fields.Boolean,
- 'score_threshold': fields.Float
+ "search_method": fields.String,
+ "reranking_enable": fields.Boolean,
+ "reranking_mode": fields.String,
+ "reranking_model": fields.Nested(reranking_model_fields),
+ "weights": fields.Nested(weighted_score_fields, allow_null=True),
+ "top_k": fields.Integer,
+ "score_threshold_enabled": fields.Boolean,
+ "score_threshold": fields.Float,
}
-tag_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'type': fields.String
-}
+tag_fields = {"id": fields.String, "name": fields.String, "type": fields.String}
dataset_detail_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'description': fields.String,
- 'provider': fields.String,
- 'permission': fields.String,
- 'data_source_type': fields.String,
- 'indexing_technique': fields.String,
- 'app_count': fields.Integer,
- 'document_count': fields.Integer,
- 'word_count': fields.Integer,
- 'created_by': fields.String,
- 'created_at': TimestampField,
- 'updated_by': fields.String,
- 'updated_at': TimestampField,
- 'embedding_model': fields.String,
- 'embedding_model_provider': fields.String,
- 'embedding_available': fields.Boolean,
- 'retrieval_model_dict': fields.Nested(dataset_retrieval_model_fields),
- 'tags': fields.List(fields.Nested(tag_fields))
+ "id": fields.String,
+ "name": fields.String,
+ "description": fields.String,
+ "provider": fields.String,
+ "permission": fields.String,
+ "data_source_type": fields.String,
+ "indexing_technique": fields.String,
+ "app_count": fields.Integer,
+ "document_count": fields.Integer,
+ "word_count": fields.Integer,
+ "created_by": fields.String,
+ "created_at": TimestampField,
+ "updated_by": fields.String,
+ "updated_at": TimestampField,
+ "embedding_model": fields.String,
+ "embedding_model_provider": fields.String,
+ "embedding_available": fields.Boolean,
+ "retrieval_model_dict": fields.Nested(dataset_retrieval_model_fields),
+ "tags": fields.List(fields.Nested(tag_fields)),
}
dataset_query_detail_fields = {
@@ -79,7 +70,5 @@
"source_app_id": fields.String,
"created_by_role": fields.String,
"created_by": fields.String,
- "created_at": TimestampField
+ "created_at": TimestampField,
}
-
-
diff --git a/api/fields/document_fields.py b/api/fields/document_fields.py
index e8215255b35d5b..a83ec7bc97adee 100644
--- a/api/fields/document_fields.py
+++ b/api/fields/document_fields.py
@@ -4,75 +4,73 @@
from libs.helper import TimestampField
document_fields = {
- 'id': fields.String,
- 'position': fields.Integer,
- 'data_source_type': fields.String,
- 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
- 'data_source_detail_dict': fields.Raw(attribute='data_source_detail_dict'),
- 'dataset_process_rule_id': fields.String,
- 'name': fields.String,
- 'created_from': fields.String,
- 'created_by': fields.String,
- 'created_at': TimestampField,
- 'tokens': fields.Integer,
- 'indexing_status': fields.String,
- 'error': fields.String,
- 'enabled': fields.Boolean,
- 'disabled_at': TimestampField,
- 'disabled_by': fields.String,
- 'archived': fields.Boolean,
- 'display_status': fields.String,
- 'word_count': fields.Integer,
- 'hit_count': fields.Integer,
- 'doc_form': fields.String,
+ "id": fields.String,
+ "position": fields.Integer,
+ "data_source_type": fields.String,
+ "data_source_info": fields.Raw(attribute="data_source_info_dict"),
+ "data_source_detail_dict": fields.Raw(attribute="data_source_detail_dict"),
+ "dataset_process_rule_id": fields.String,
+ "name": fields.String,
+ "created_from": fields.String,
+ "created_by": fields.String,
+ "created_at": TimestampField,
+ "tokens": fields.Integer,
+ "indexing_status": fields.String,
+ "error": fields.String,
+ "enabled": fields.Boolean,
+ "disabled_at": TimestampField,
+ "disabled_by": fields.String,
+ "archived": fields.Boolean,
+ "display_status": fields.String,
+ "word_count": fields.Integer,
+ "hit_count": fields.Integer,
+ "doc_form": fields.String,
}
document_with_segments_fields = {
- 'id': fields.String,
- 'position': fields.Integer,
- 'data_source_type': fields.String,
- 'data_source_info': fields.Raw(attribute='data_source_info_dict'),
- 'data_source_detail_dict': fields.Raw(attribute='data_source_detail_dict'),
- 'dataset_process_rule_id': fields.String,
- 'name': fields.String,
- 'created_from': fields.String,
- 'created_by': fields.String,
- 'created_at': TimestampField,
- 'tokens': fields.Integer,
- 'indexing_status': fields.String,
- 'error': fields.String,
- 'enabled': fields.Boolean,
- 'disabled_at': TimestampField,
- 'disabled_by': fields.String,
- 'archived': fields.Boolean,
- 'display_status': fields.String,
- 'word_count': fields.Integer,
- 'hit_count': fields.Integer,
- 'completed_segments': fields.Integer,
- 'total_segments': fields.Integer
+ "id": fields.String,
+ "position": fields.Integer,
+ "data_source_type": fields.String,
+ "data_source_info": fields.Raw(attribute="data_source_info_dict"),
+ "data_source_detail_dict": fields.Raw(attribute="data_source_detail_dict"),
+ "dataset_process_rule_id": fields.String,
+ "name": fields.String,
+ "created_from": fields.String,
+ "created_by": fields.String,
+ "created_at": TimestampField,
+ "tokens": fields.Integer,
+ "indexing_status": fields.String,
+ "error": fields.String,
+ "enabled": fields.Boolean,
+ "disabled_at": TimestampField,
+ "disabled_by": fields.String,
+ "archived": fields.Boolean,
+ "display_status": fields.String,
+ "word_count": fields.Integer,
+ "hit_count": fields.Integer,
+ "completed_segments": fields.Integer,
+ "total_segments": fields.Integer,
}
dataset_and_document_fields = {
- 'dataset': fields.Nested(dataset_fields),
- 'documents': fields.List(fields.Nested(document_fields)),
- 'batch': fields.String
+ "dataset": fields.Nested(dataset_fields),
+ "documents": fields.List(fields.Nested(document_fields)),
+ "batch": fields.String,
}
document_status_fields = {
- 'id': fields.String,
- 'indexing_status': fields.String,
- 'processing_started_at': TimestampField,
- 'parsing_completed_at': TimestampField,
- 'cleaning_completed_at': TimestampField,
- 'splitting_completed_at': TimestampField,
- 'completed_at': TimestampField,
- 'paused_at': TimestampField,
- 'error': fields.String,
- 'stopped_at': TimestampField,
- 'completed_segments': fields.Integer,
- 'total_segments': fields.Integer,
+ "id": fields.String,
+ "indexing_status": fields.String,
+ "processing_started_at": TimestampField,
+ "parsing_completed_at": TimestampField,
+ "cleaning_completed_at": TimestampField,
+ "splitting_completed_at": TimestampField,
+ "completed_at": TimestampField,
+ "paused_at": TimestampField,
+ "error": fields.String,
+ "stopped_at": TimestampField,
+ "completed_segments": fields.Integer,
+ "total_segments": fields.Integer,
}
-document_status_fields_list = {
- 'data': fields.List(fields.Nested(document_status_fields))
-}
\ No newline at end of file
+document_status_fields_list = {"data": fields.List(fields.Nested(document_status_fields))}
diff --git a/api/fields/end_user_fields.py b/api/fields/end_user_fields.py
index ee630c12c2e9aa..99e529f9d1c076 100644
--- a/api/fields/end_user_fields.py
+++ b/api/fields/end_user_fields.py
@@ -1,8 +1,8 @@
from flask_restful import fields
simple_end_user_fields = {
- 'id': fields.String,
- 'type': fields.String,
- 'is_anonymous': fields.Boolean,
- 'session_id': fields.String,
+ "id": fields.String,
+ "type": fields.String,
+ "is_anonymous": fields.Boolean,
+ "session_id": fields.String,
}
diff --git a/api/fields/file_fields.py b/api/fields/file_fields.py
index 2ef379dabc0d08..e5a03ce77ed5f0 100644
--- a/api/fields/file_fields.py
+++ b/api/fields/file_fields.py
@@ -3,17 +3,17 @@
from libs.helper import TimestampField
upload_config_fields = {
- 'file_size_limit': fields.Integer,
- 'batch_count_limit': fields.Integer,
- 'image_file_size_limit': fields.Integer,
+ "file_size_limit": fields.Integer,
+ "batch_count_limit": fields.Integer,
+ "image_file_size_limit": fields.Integer,
}
file_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'size': fields.Integer,
- 'extension': fields.String,
- 'mime_type': fields.String,
- 'created_by': fields.String,
- 'created_at': TimestampField,
-}
\ No newline at end of file
+ "id": fields.String,
+ "name": fields.String,
+ "size": fields.Integer,
+ "extension": fields.String,
+ "mime_type": fields.String,
+ "created_by": fields.String,
+ "created_at": TimestampField,
+}
diff --git a/api/fields/hit_testing_fields.py b/api/fields/hit_testing_fields.py
index 541e56a378dae4..f36e80f8d493d5 100644
--- a/api/fields/hit_testing_fields.py
+++ b/api/fields/hit_testing_fields.py
@@ -3,39 +3,39 @@
from libs.helper import TimestampField
document_fields = {
- 'id': fields.String,
- 'data_source_type': fields.String,
- 'name': fields.String,
- 'doc_type': fields.String,
+ "id": fields.String,
+ "data_source_type": fields.String,
+ "name": fields.String,
+ "doc_type": fields.String,
}
segment_fields = {
- 'id': fields.String,
- 'position': fields.Integer,
- 'document_id': fields.String,
- 'content': fields.String,
- 'answer': fields.String,
- 'word_count': fields.Integer,
- 'tokens': fields.Integer,
- 'keywords': fields.List(fields.String),
- 'index_node_id': fields.String,
- 'index_node_hash': fields.String,
- 'hit_count': fields.Integer,
- 'enabled': fields.Boolean,
- 'disabled_at': TimestampField,
- 'disabled_by': fields.String,
- 'status': fields.String,
- 'created_by': fields.String,
- 'created_at': TimestampField,
- 'indexing_at': TimestampField,
- 'completed_at': TimestampField,
- 'error': fields.String,
- 'stopped_at': TimestampField,
- 'document': fields.Nested(document_fields),
+ "id": fields.String,
+ "position": fields.Integer,
+ "document_id": fields.String,
+ "content": fields.String,
+ "answer": fields.String,
+ "word_count": fields.Integer,
+ "tokens": fields.Integer,
+ "keywords": fields.List(fields.String),
+ "index_node_id": fields.String,
+ "index_node_hash": fields.String,
+ "hit_count": fields.Integer,
+ "enabled": fields.Boolean,
+ "disabled_at": TimestampField,
+ "disabled_by": fields.String,
+ "status": fields.String,
+ "created_by": fields.String,
+ "created_at": TimestampField,
+ "indexing_at": TimestampField,
+ "completed_at": TimestampField,
+ "error": fields.String,
+ "stopped_at": TimestampField,
+ "document": fields.Nested(document_fields),
}
hit_testing_record_fields = {
- 'segment': fields.Nested(segment_fields),
- 'score': fields.Float,
- 'tsne_position': fields.Raw
-}
\ No newline at end of file
+ "segment": fields.Nested(segment_fields),
+ "score": fields.Float,
+ "tsne_position": fields.Raw,
+}
diff --git a/api/fields/installed_app_fields.py b/api/fields/installed_app_fields.py
index 35cc5a64755eca..b87cc653240a71 100644
--- a/api/fields/installed_app_fields.py
+++ b/api/fields/installed_app_fields.py
@@ -3,23 +3,21 @@
from libs.helper import TimestampField
app_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'mode': fields.String,
- 'icon': fields.String,
- 'icon_background': fields.String
+ "id": fields.String,
+ "name": fields.String,
+ "mode": fields.String,
+ "icon": fields.String,
+ "icon_background": fields.String,
}
installed_app_fields = {
- 'id': fields.String,
- 'app': fields.Nested(app_fields),
- 'app_owner_tenant_id': fields.String,
- 'is_pinned': fields.Boolean,
- 'last_used_at': TimestampField,
- 'editable': fields.Boolean,
- 'uninstallable': fields.Boolean
+ "id": fields.String,
+ "app": fields.Nested(app_fields),
+ "app_owner_tenant_id": fields.String,
+ "is_pinned": fields.Boolean,
+ "last_used_at": TimestampField,
+ "editable": fields.Boolean,
+ "uninstallable": fields.Boolean,
}
-installed_app_list_fields = {
- 'installed_apps': fields.List(fields.Nested(installed_app_fields))
-}
\ No newline at end of file
+installed_app_list_fields = {"installed_apps": fields.List(fields.Nested(installed_app_fields))}
diff --git a/api/fields/member_fields.py b/api/fields/member_fields.py
index d061b59c347022..1cf8e408d13d32 100644
--- a/api/fields/member_fields.py
+++ b/api/fields/member_fields.py
@@ -2,38 +2,32 @@
from libs.helper import TimestampField
-simple_account_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'email': fields.String
-}
+simple_account_fields = {"id": fields.String, "name": fields.String, "email": fields.String}
account_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'avatar': fields.String,
- 'email': fields.String,
- 'is_password_set': fields.Boolean,
- 'interface_language': fields.String,
- 'interface_theme': fields.String,
- 'timezone': fields.String,
- 'last_login_at': TimestampField,
- 'last_login_ip': fields.String,
- 'created_at': TimestampField
+ "id": fields.String,
+ "name": fields.String,
+ "avatar": fields.String,
+ "email": fields.String,
+ "is_password_set": fields.Boolean,
+ "interface_language": fields.String,
+ "interface_theme": fields.String,
+ "timezone": fields.String,
+ "last_login_at": TimestampField,
+ "last_login_ip": fields.String,
+ "created_at": TimestampField,
}
account_with_role_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'avatar': fields.String,
- 'email': fields.String,
- 'last_login_at': TimestampField,
- 'last_active_at': TimestampField,
- 'created_at': TimestampField,
- 'role': fields.String,
- 'status': fields.String,
+ "id": fields.String,
+ "name": fields.String,
+ "avatar": fields.String,
+ "email": fields.String,
+ "last_login_at": TimestampField,
+ "last_active_at": TimestampField,
+ "created_at": TimestampField,
+ "role": fields.String,
+ "status": fields.String,
}
-account_with_role_list_fields = {
- 'accounts': fields.List(fields.Nested(account_with_role_fields))
-}
+account_with_role_list_fields = {"accounts": fields.List(fields.Nested(account_with_role_fields))}
diff --git a/api/fields/message_fields.py b/api/fields/message_fields.py
index 31168435892427..3d2df87afb9b19 100644
--- a/api/fields/message_fields.py
+++ b/api/fields/message_fields.py
@@ -3,83 +3,79 @@
from fields.conversation_fields import message_file_fields
from libs.helper import TimestampField
-feedback_fields = {
- 'rating': fields.String
-}
+feedback_fields = {"rating": fields.String}
retriever_resource_fields = {
- 'id': fields.String,
- 'message_id': fields.String,
- 'position': fields.Integer,
- 'dataset_id': fields.String,
- 'dataset_name': fields.String,
- 'document_id': fields.String,
- 'document_name': fields.String,
- 'data_source_type': fields.String,
- 'segment_id': fields.String,
- 'score': fields.Float,
- 'hit_count': fields.Integer,
- 'word_count': fields.Integer,
- 'segment_position': fields.Integer,
- 'index_node_hash': fields.String,
- 'content': fields.String,
- 'created_at': TimestampField
+ "id": fields.String,
+ "message_id": fields.String,
+ "position": fields.Integer,
+ "dataset_id": fields.String,
+ "dataset_name": fields.String,
+ "document_id": fields.String,
+ "document_name": fields.String,
+ "data_source_type": fields.String,
+ "segment_id": fields.String,
+ "score": fields.Float,
+ "hit_count": fields.Integer,
+ "word_count": fields.Integer,
+ "segment_position": fields.Integer,
+ "index_node_hash": fields.String,
+ "content": fields.String,
+ "created_at": TimestampField,
}
-feedback_fields = {
- 'rating': fields.String
-}
+feedback_fields = {"rating": fields.String}
agent_thought_fields = {
- 'id': fields.String,
- 'chain_id': fields.String,
- 'message_id': fields.String,
- 'position': fields.Integer,
- 'thought': fields.String,
- 'tool': fields.String,
- 'tool_labels': fields.Raw,
- 'tool_input': fields.String,
- 'created_at': TimestampField,
- 'observation': fields.String,
- 'files': fields.List(fields.String)
+ "id": fields.String,
+ "chain_id": fields.String,
+ "message_id": fields.String,
+ "position": fields.Integer,
+ "thought": fields.String,
+ "tool": fields.String,
+ "tool_labels": fields.Raw,
+ "tool_input": fields.String,
+ "created_at": TimestampField,
+ "observation": fields.String,
+ "files": fields.List(fields.String),
}
retriever_resource_fields = {
- 'id': fields.String,
- 'message_id': fields.String,
- 'position': fields.Integer,
- 'dataset_id': fields.String,
- 'dataset_name': fields.String,
- 'document_id': fields.String,
- 'document_name': fields.String,
- 'data_source_type': fields.String,
- 'segment_id': fields.String,
- 'score': fields.Float,
- 'hit_count': fields.Integer,
- 'word_count': fields.Integer,
- 'segment_position': fields.Integer,
- 'index_node_hash': fields.String,
- 'content': fields.String,
- 'created_at': TimestampField
+ "id": fields.String,
+ "message_id": fields.String,
+ "position": fields.Integer,
+ "dataset_id": fields.String,
+ "dataset_name": fields.String,
+ "document_id": fields.String,
+ "document_name": fields.String,
+ "data_source_type": fields.String,
+ "segment_id": fields.String,
+ "score": fields.Float,
+ "hit_count": fields.Integer,
+ "word_count": fields.Integer,
+ "segment_position": fields.Integer,
+ "index_node_hash": fields.String,
+ "content": fields.String,
+ "created_at": TimestampField,
}
message_fields = {
- 'id': fields.String,
- 'conversation_id': fields.String,
- 'inputs': fields.Raw,
- 'query': fields.String,
- 'answer': fields.String(attribute='re_sign_file_url_answer'),
- 'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
- 'retriever_resources': fields.List(fields.Nested(retriever_resource_fields)),
- 'created_at': TimestampField,
- 'agent_thoughts': fields.List(fields.Nested(agent_thought_fields)),
- 'message_files': fields.List(fields.Nested(message_file_fields), attribute='files'),
- 'status': fields.String,
- 'error': fields.String,
+ "id": fields.String,
+ "conversation_id": fields.String,
+ "inputs": fields.Raw,
+ "query": fields.String,
+ "answer": fields.String(attribute="re_sign_file_url_answer"),
+ "feedback": fields.Nested(feedback_fields, attribute="user_feedback", allow_null=True),
+ "retriever_resources": fields.List(fields.Nested(retriever_resource_fields)),
+ "created_at": TimestampField,
+ "agent_thoughts": fields.List(fields.Nested(agent_thought_fields)),
+ "message_files": fields.List(fields.Nested(message_file_fields), attribute="files"),
+ "status": fields.String,
+ "error": fields.String,
}
message_infinite_scroll_pagination_fields = {
- 'limit': fields.Integer,
- 'has_more': fields.Boolean,
- 'data': fields.List(fields.Nested(message_fields))
+ "limit": fields.Integer,
+ "has_more": fields.Boolean,
+ "data": fields.List(fields.Nested(message_fields)),
}
diff --git a/api/fields/segment_fields.py b/api/fields/segment_fields.py
index e41d1a53dd0234..2dd4cb45be409b 100644
--- a/api/fields/segment_fields.py
+++ b/api/fields/segment_fields.py
@@ -3,31 +3,31 @@
from libs.helper import TimestampField
segment_fields = {
- 'id': fields.String,
- 'position': fields.Integer,
- 'document_id': fields.String,
- 'content': fields.String,
- 'answer': fields.String,
- 'word_count': fields.Integer,
- 'tokens': fields.Integer,
- 'keywords': fields.List(fields.String),
- 'index_node_id': fields.String,
- 'index_node_hash': fields.String,
- 'hit_count': fields.Integer,
- 'enabled': fields.Boolean,
- 'disabled_at': TimestampField,
- 'disabled_by': fields.String,
- 'status': fields.String,
- 'created_by': fields.String,
- 'created_at': TimestampField,
- 'indexing_at': TimestampField,
- 'completed_at': TimestampField,
- 'error': fields.String,
- 'stopped_at': TimestampField
+ "id": fields.String,
+ "position": fields.Integer,
+ "document_id": fields.String,
+ "content": fields.String,
+ "answer": fields.String,
+ "word_count": fields.Integer,
+ "tokens": fields.Integer,
+ "keywords": fields.List(fields.String),
+ "index_node_id": fields.String,
+ "index_node_hash": fields.String,
+ "hit_count": fields.Integer,
+ "enabled": fields.Boolean,
+ "disabled_at": TimestampField,
+ "disabled_by": fields.String,
+ "status": fields.String,
+ "created_by": fields.String,
+ "created_at": TimestampField,
+ "indexing_at": TimestampField,
+ "completed_at": TimestampField,
+ "error": fields.String,
+ "stopped_at": TimestampField,
}
segment_list_response = {
- 'data': fields.List(fields.Nested(segment_fields)),
- 'has_more': fields.Boolean,
- 'limit': fields.Integer
+ "data": fields.List(fields.Nested(segment_fields)),
+ "has_more": fields.Boolean,
+ "limit": fields.Integer,
}
diff --git a/api/fields/tag_fields.py b/api/fields/tag_fields.py
index f7e030b738e537..9af4fc57dd061c 100644
--- a/api/fields/tag_fields.py
+++ b/api/fields/tag_fields.py
@@ -1,8 +1,3 @@
from flask_restful import fields
-tag_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'type': fields.String,
- 'binding_count': fields.String
-}
\ No newline at end of file
+tag_fields = {"id": fields.String, "name": fields.String, "type": fields.String, "binding_count": fields.String}
diff --git a/api/fields/workflow_app_log_fields.py b/api/fields/workflow_app_log_fields.py
index e230c159fba59a..a53b54624915c2 100644
--- a/api/fields/workflow_app_log_fields.py
+++ b/api/fields/workflow_app_log_fields.py
@@ -7,18 +7,18 @@
workflow_app_log_partial_fields = {
"id": fields.String,
- "workflow_run": fields.Nested(workflow_run_for_log_fields, attribute='workflow_run', allow_null=True),
+ "workflow_run": fields.Nested(workflow_run_for_log_fields, attribute="workflow_run", allow_null=True),
"created_from": fields.String,
"created_by_role": fields.String,
- "created_by_account": fields.Nested(simple_account_fields, attribute='created_by_account', allow_null=True),
- "created_by_end_user": fields.Nested(simple_end_user_fields, attribute='created_by_end_user', allow_null=True),
- "created_at": TimestampField
+ "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True),
+ "created_by_end_user": fields.Nested(simple_end_user_fields, attribute="created_by_end_user", allow_null=True),
+ "created_at": TimestampField,
}
workflow_app_log_pagination_fields = {
- 'page': fields.Integer,
- 'limit': fields.Integer(attribute='per_page'),
- 'total': fields.Integer,
- 'has_more': fields.Boolean(attribute='has_next'),
- 'data': fields.List(fields.Nested(workflow_app_log_partial_fields), attribute='items')
+ "page": fields.Integer,
+ "limit": fields.Integer(attribute="per_page"),
+ "total": fields.Integer,
+ "has_more": fields.Boolean(attribute="has_next"),
+ "data": fields.List(fields.Nested(workflow_app_log_partial_fields), attribute="items"),
}
diff --git a/api/fields/workflow_fields.py b/api/fields/workflow_fields.py
index ff33a97ff2a9ab..240b8f2eb03e79 100644
--- a/api/fields/workflow_fields.py
+++ b/api/fields/workflow_fields.py
@@ -13,41 +13,43 @@ def format(self, value):
# Mask secret variables values in environment_variables
if isinstance(value, SecretVariable):
return {
- 'id': value.id,
- 'name': value.name,
- 'value': encrypter.obfuscated_token(value.value),
- 'value_type': value.value_type.value,
+ "id": value.id,
+ "name": value.name,
+ "value": encrypter.obfuscated_token(value.value),
+ "value_type": value.value_type.value,
}
if isinstance(value, Variable):
return {
- 'id': value.id,
- 'name': value.name,
- 'value': value.value,
- 'value_type': value.value_type.value,
+ "id": value.id,
+ "name": value.name,
+ "value": value.value,
+ "value_type": value.value_type.value,
}
if isinstance(value, dict):
- value_type = value.get('value_type')
+ value_type = value.get("value_type")
if value_type not in ENVIRONMENT_VARIABLE_SUPPORTED_TYPES:
- raise ValueError(f'Unsupported environment variable value type: {value_type}')
+ raise ValueError(f"Unsupported environment variable value type: {value_type}")
return value
-environment_variable_fields = {
- 'id': fields.String,
- 'name': fields.String,
- 'value': fields.Raw,
- 'value_type': fields.String(attribute='value_type.value'),
+conversation_variable_fields = {
+ "id": fields.String,
+ "name": fields.String,
+ "value_type": fields.String(attribute="value_type.value"),
+ "value": fields.Raw,
+ "description": fields.String,
}
workflow_fields = {
- 'id': fields.String,
- 'graph': fields.Raw(attribute='graph_dict'),
- 'features': fields.Raw(attribute='features_dict'),
- 'hash': fields.String(attribute='unique_hash'),
- 'created_by': fields.Nested(simple_account_fields, attribute='created_by_account'),
- 'created_at': TimestampField,
- 'updated_by': fields.Nested(simple_account_fields, attribute='updated_by_account', allow_null=True),
- 'updated_at': TimestampField,
- 'tool_published': fields.Boolean,
- 'environment_variables': fields.List(EnvironmentVariableField()),
+ "id": fields.String,
+ "graph": fields.Raw(attribute="graph_dict"),
+ "features": fields.Raw(attribute="features_dict"),
+ "hash": fields.String(attribute="unique_hash"),
+ "created_by": fields.Nested(simple_account_fields, attribute="created_by_account"),
+ "created_at": TimestampField,
+ "updated_by": fields.Nested(simple_account_fields, attribute="updated_by_account", allow_null=True),
+ "updated_at": TimestampField,
+ "tool_published": fields.Boolean,
+ "environment_variables": fields.List(EnvironmentVariableField()),
+ "conversation_variables": fields.List(fields.Nested(conversation_variable_fields)),
}
diff --git a/api/fields/workflow_run_fields.py b/api/fields/workflow_run_fields.py
index 3e798473cd0481..1413adf7196879 100644
--- a/api/fields/workflow_run_fields.py
+++ b/api/fields/workflow_run_fields.py
@@ -13,7 +13,7 @@
"total_tokens": fields.Integer,
"total_steps": fields.Integer,
"created_at": TimestampField,
- "finished_at": TimestampField
+ "finished_at": TimestampField,
}
workflow_run_for_list_fields = {
@@ -24,9 +24,9 @@
"elapsed_time": fields.Float,
"total_tokens": fields.Integer,
"total_steps": fields.Integer,
- "created_by_account": fields.Nested(simple_account_fields, attribute='created_by_account', allow_null=True),
+ "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True),
"created_at": TimestampField,
- "finished_at": TimestampField
+ "finished_at": TimestampField,
}
advanced_chat_workflow_run_for_list_fields = {
@@ -39,40 +39,40 @@
"elapsed_time": fields.Float,
"total_tokens": fields.Integer,
"total_steps": fields.Integer,
- "created_by_account": fields.Nested(simple_account_fields, attribute='created_by_account', allow_null=True),
+ "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True),
"created_at": TimestampField,
- "finished_at": TimestampField
+ "finished_at": TimestampField,
}
advanced_chat_workflow_run_pagination_fields = {
- 'limit': fields.Integer(attribute='limit'),
- 'has_more': fields.Boolean(attribute='has_more'),
- 'data': fields.List(fields.Nested(advanced_chat_workflow_run_for_list_fields), attribute='data')
+ "limit": fields.Integer(attribute="limit"),
+ "has_more": fields.Boolean(attribute="has_more"),
+ "data": fields.List(fields.Nested(advanced_chat_workflow_run_for_list_fields), attribute="data"),
}
workflow_run_pagination_fields = {
- 'limit': fields.Integer(attribute='limit'),
- 'has_more': fields.Boolean(attribute='has_more'),
- 'data': fields.List(fields.Nested(workflow_run_for_list_fields), attribute='data')
+ "limit": fields.Integer(attribute="limit"),
+ "has_more": fields.Boolean(attribute="has_more"),
+ "data": fields.List(fields.Nested(workflow_run_for_list_fields), attribute="data"),
}
workflow_run_detail_fields = {
"id": fields.String,
"sequence_number": fields.Integer,
"version": fields.String,
- "graph": fields.Raw(attribute='graph_dict'),
- "inputs": fields.Raw(attribute='inputs_dict'),
+ "graph": fields.Raw(attribute="graph_dict"),
+ "inputs": fields.Raw(attribute="inputs_dict"),
"status": fields.String,
- "outputs": fields.Raw(attribute='outputs_dict'),
+ "outputs": fields.Raw(attribute="outputs_dict"),
"error": fields.String,
"elapsed_time": fields.Float,
"total_tokens": fields.Integer,
"total_steps": fields.Integer,
"created_by_role": fields.String,
- "created_by_account": fields.Nested(simple_account_fields, attribute='created_by_account', allow_null=True),
- "created_by_end_user": fields.Nested(simple_end_user_fields, attribute='created_by_end_user', allow_null=True),
+ "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True),
+ "created_by_end_user": fields.Nested(simple_end_user_fields, attribute="created_by_end_user", allow_null=True),
"created_at": TimestampField,
- "finished_at": TimestampField
+ "finished_at": TimestampField,
}
workflow_run_node_execution_fields = {
@@ -82,21 +82,21 @@
"node_id": fields.String,
"node_type": fields.String,
"title": fields.String,
- "inputs": fields.Raw(attribute='inputs_dict'),
- "process_data": fields.Raw(attribute='process_data_dict'),
- "outputs": fields.Raw(attribute='outputs_dict'),
+ "inputs": fields.Raw(attribute="inputs_dict"),
+ "process_data": fields.Raw(attribute="process_data_dict"),
+ "outputs": fields.Raw(attribute="outputs_dict"),
"status": fields.String,
"error": fields.String,
"elapsed_time": fields.Float,
- "execution_metadata": fields.Raw(attribute='execution_metadata_dict'),
+ "execution_metadata": fields.Raw(attribute="execution_metadata_dict"),
"extras": fields.Raw,
"created_at": TimestampField,
"created_by_role": fields.String,
- "created_by_account": fields.Nested(simple_account_fields, attribute='created_by_account', allow_null=True),
- "created_by_end_user": fields.Nested(simple_end_user_fields, attribute='created_by_end_user', allow_null=True),
- "finished_at": TimestampField
+ "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True),
+ "created_by_end_user": fields.Nested(simple_end_user_fields, attribute="created_by_end_user", allow_null=True),
+ "finished_at": TimestampField,
}
workflow_run_node_execution_list_fields = {
- 'data': fields.List(fields.Nested(workflow_run_node_execution_fields)),
+ "data": fields.List(fields.Nested(workflow_run_node_execution_fields)),
}
diff --git a/api/libs/bearer_data_source.py b/api/libs/bearer_data_source.py
index 04de1fb6daefbd..c1aee7b819e411 100644
--- a/api/libs/bearer_data_source.py
+++ b/api/libs/bearer_data_source.py
@@ -2,10 +2,10 @@
from abc import abstractmethod
import requests
-from api.models.source import DataSourceBearerBinding
from flask_login import current_user
from extensions.ext_database import db
+from models.source import DataSourceBearerBinding
class BearerDataSource:
diff --git a/api/libs/oauth_data_source.py b/api/libs/oauth_data_source.py
index a5c7814a543bdc..358858ceb1ec4d 100644
--- a/api/libs/oauth_data_source.py
+++ b/api/libs/oauth_data_source.py
@@ -154,11 +154,11 @@ def get_authorized_pages(self, access_token: str):
for page_result in page_results:
page_id = page_result['id']
page_name = 'Untitled'
- for key in ['Name', 'title', 'Title', 'Page']:
- if key in page_result['properties']:
- if len(page_result['properties'][key].get('title', [])) > 0:
- page_name = page_result['properties'][key]['title'][0]['plain_text']
- break
+ for key in page_result['properties']:
+ if 'title' in page_result['properties'][key] and page_result['properties'][key]['title']:
+ title_list = page_result['properties'][key]['title']
+ if len(title_list) > 0 and 'plain_text' in title_list[0]:
+ page_name = title_list[0]['plain_text']
page_icon = page_result['icon']
if page_icon:
icon_type = page_icon['type']
diff --git a/api/migrations/versions/2024_08_09_0801-1787fbae959a_update_tools_original_url_length.py b/api/migrations/versions/2024_08_09_0801-1787fbae959a_update_tools_original_url_length.py
new file mode 100644
index 00000000000000..db966252f1a63c
--- /dev/null
+++ b/api/migrations/versions/2024_08_09_0801-1787fbae959a_update_tools_original_url_length.py
@@ -0,0 +1,39 @@
+"""update tools original_url length
+
+Revision ID: 1787fbae959a
+Revises: eeb2e349e6ac
+Create Date: 2024-08-09 08:01:12.817620
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+import models as models
+
+# revision identifiers, used by Alembic.
+revision = '1787fbae959a'
+down_revision = 'eeb2e349e6ac'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('tool_files', schema=None) as batch_op:
+ batch_op.alter_column('original_url',
+ existing_type=sa.VARCHAR(length=255),
+ type_=sa.String(length=2048),
+ existing_nullable=True)
+
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('tool_files', schema=None) as batch_op:
+ batch_op.alter_column('original_url',
+ existing_type=sa.String(length=2048),
+ type_=sa.VARCHAR(length=255),
+ existing_nullable=True)
+
+ # ### end Alembic commands ###
diff --git a/api/migrations/versions/2024_08_13_0633-63a83fcf12ba_support_conversation_variables.py b/api/migrations/versions/2024_08_13_0633-63a83fcf12ba_support_conversation_variables.py
new file mode 100644
index 00000000000000..16e1efd4efd4ed
--- /dev/null
+++ b/api/migrations/versions/2024_08_13_0633-63a83fcf12ba_support_conversation_variables.py
@@ -0,0 +1,51 @@
+"""support conversation variables
+
+Revision ID: 63a83fcf12ba
+Revises: 1787fbae959a
+Create Date: 2024-08-13 06:33:07.950379
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+import models as models
+
+# revision identifiers, used by Alembic.
+revision = '63a83fcf12ba'
+down_revision = '1787fbae959a'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('workflow__conversation_variables',
+ sa.Column('id', models.types.StringUUID(), nullable=False),
+ sa.Column('conversation_id', models.types.StringUUID(), nullable=False),
+ sa.Column('app_id', models.types.StringUUID(), nullable=False),
+ sa.Column('data', sa.Text(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
+ sa.PrimaryKeyConstraint('id', 'conversation_id', name=op.f('workflow__conversation_variables_pkey'))
+ )
+ with op.batch_alter_table('workflow__conversation_variables', schema=None) as batch_op:
+ batch_op.create_index(batch_op.f('workflow__conversation_variables_app_id_idx'), ['app_id'], unique=False)
+ batch_op.create_index(batch_op.f('workflow__conversation_variables_created_at_idx'), ['created_at'], unique=False)
+
+ with op.batch_alter_table('workflows', schema=None) as batch_op:
+ batch_op.add_column(sa.Column('conversation_variables', sa.Text(), server_default='{}', nullable=False))
+
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('workflows', schema=None) as batch_op:
+ batch_op.drop_column('conversation_variables')
+
+ with op.batch_alter_table('workflow__conversation_variables', schema=None) as batch_op:
+ batch_op.drop_index(batch_op.f('workflow__conversation_variables_created_at_idx'))
+ batch_op.drop_index(batch_op.f('workflow__conversation_variables_app_id_idx'))
+
+ op.drop_table('workflow__conversation_variables')
+ # ### end Alembic commands ###
diff --git a/api/migrations/versions/2024_08_14_1354-8782057ff0dc_add_conversations_dialogue_count.py b/api/migrations/versions/2024_08_14_1354-8782057ff0dc_add_conversations_dialogue_count.py
new file mode 100644
index 00000000000000..eba78e2e77d5d8
--- /dev/null
+++ b/api/migrations/versions/2024_08_14_1354-8782057ff0dc_add_conversations_dialogue_count.py
@@ -0,0 +1,33 @@
+"""add conversations.dialogue_count
+
+Revision ID: 8782057ff0dc
+Revises: 63a83fcf12ba
+Create Date: 2024-08-14 13:54:25.161324
+
+"""
+import sqlalchemy as sa
+from alembic import op
+
+import models as models
+
+# revision identifiers, used by Alembic.
+revision = '8782057ff0dc'
+down_revision = '63a83fcf12ba'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('conversations', schema=None) as batch_op:
+ batch_op.add_column(sa.Column('dialogue_count', sa.Integer(), server_default='0', nullable=False))
+
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ with op.batch_alter_table('conversations', schema=None) as batch_op:
+ batch_op.drop_column('dialogue_count')
+
+ # ### end Alembic commands ###
diff --git a/api/models/__init__.py b/api/models/__init__.py
index 3b832cd22d8120..4012611471c337 100644
--- a/api/models/__init__.py
+++ b/api/models/__init__.py
@@ -1,15 +1,19 @@
from enum import Enum
-from sqlalchemy import CHAR, TypeDecorator
-from sqlalchemy.dialects.postgresql import UUID
+from .model import App, AppMode, Message
+from .types import StringUUID
+from .workflow import ConversationVariable, Workflow, WorkflowNodeExecutionStatus
+
+__all__ = ['ConversationVariable', 'StringUUID', 'AppMode', 'WorkflowNodeExecutionStatus', 'Workflow', 'App', 'Message']
class CreatedByRole(Enum):
"""
Enum class for createdByRole
"""
- ACCOUNT = "account"
- END_USER = "end_user"
+
+ ACCOUNT = 'account'
+ END_USER = 'end_user'
@classmethod
def value_of(cls, value: str) -> 'CreatedByRole':
@@ -23,49 +27,3 @@ def value_of(cls, value: str) -> 'CreatedByRole':
if role.value == value:
return role
raise ValueError(f'invalid createdByRole value {value}')
-
-
-class CreatedFrom(Enum):
- """
- Enum class for createdFrom
- """
- SERVICE_API = "service-api"
- WEB_APP = "web-app"
- EXPLORE = "explore"
-
- @classmethod
- def value_of(cls, value: str) -> 'CreatedFrom':
- """
- Get value of given mode.
-
- :param value: mode value
- :return: mode
- """
- for role in cls:
- if role.value == value:
- return role
- raise ValueError(f'invalid createdFrom value {value}')
-
-
-class StringUUID(TypeDecorator):
- impl = CHAR
- cache_ok = True
-
- def process_bind_param(self, value, dialect):
- if value is None:
- return value
- elif dialect.name == 'postgresql':
- return str(value)
- else:
- return value.hex
-
- def load_dialect_impl(self, dialect):
- if dialect.name == 'postgresql':
- return dialect.type_descriptor(UUID())
- else:
- return dialect.type_descriptor(CHAR(36))
-
- def process_result_value(self, value, dialect):
- if value is None:
- return value
- return str(value)
diff --git a/api/models/account.py b/api/models/account.py
index d36b2b9fda3278..67d940b7b7190e 100644
--- a/api/models/account.py
+++ b/api/models/account.py
@@ -4,7 +4,8 @@
from flask_login import UserMixin
from extensions.ext_database import db
-from models import StringUUID
+
+from .types import StringUUID
class AccountStatus(str, enum.Enum):
diff --git a/api/models/api_based_extension.py b/api/models/api_based_extension.py
index d1f9cd78a72e45..7f69323628a7cc 100644
--- a/api/models/api_based_extension.py
+++ b/api/models/api_based_extension.py
@@ -1,7 +1,8 @@
import enum
from extensions.ext_database import db
-from models import StringUUID
+
+from .types import StringUUID
class APIBasedExtensionPoint(enum.Enum):
diff --git a/api/models/dataset.py b/api/models/dataset.py
index 40f9f4cf83ae96..0d48177eb60409 100644
--- a/api/models/dataset.py
+++ b/api/models/dataset.py
@@ -16,9 +16,10 @@
from core.rag.retrieval.retrival_methods import RetrievalMethod
from extensions.ext_database import db
from extensions.ext_storage import storage
-from models import StringUUID
-from models.account import Account
-from models.model import App, Tag, TagBinding, UploadFile
+
+from .account import Account
+from .model import App, Tag, TagBinding, UploadFile
+from .types import StringUUID
class Dataset(db.Model):
diff --git a/api/models/model.py b/api/models/model.py
index a6f517ea6b181f..5426d3bc83e020 100644
--- a/api/models/model.py
+++ b/api/models/model.py
@@ -7,6 +7,7 @@
from flask import request
from flask_login import UserMixin
from sqlalchemy import Float, func, text
+from sqlalchemy.orm import Mapped, mapped_column
from configs import dify_config
from core.file.tool_file_parser import ToolFileParser
@@ -14,8 +15,8 @@
from extensions.ext_database import db
from libs.helper import generate_string
-from . import StringUUID
from .account import Account, Tenant
+from .types import StringUUID
class DifySetup(db.Model):
@@ -512,12 +513,12 @@ class Conversation(db.Model):
from_account_id = db.Column(StringUUID)
read_at = db.Column(db.DateTime)
read_account_id = db.Column(StringUUID)
+ dialogue_count: Mapped[int] = mapped_column(default=0)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
messages = db.relationship("Message", backref="conversation", lazy='select', passive_deletes="all")
- message_annotations = db.relationship("MessageAnnotation", backref="conversation", lazy='select',
- passive_deletes="all")
+ message_annotations = db.relationship("MessageAnnotation", backref="conversation", lazy='select', passive_deletes="all")
is_deleted = db.Column(db.Boolean, nullable=False, server_default=db.text('false'))
@@ -1116,7 +1117,7 @@ def generate_code(n):
@property
def app_base_url(self):
return (
- dify_config.APP_WEB_URL if dify_config.APP_WEB_URL else request.host_url.rstrip('/'))
+ dify_config.APP_WEB_URL if dify_config.APP_WEB_URL else request.url_root.rstrip('/'))
class ApiToken(db.Model):
diff --git a/api/models/provider.py b/api/models/provider.py
index 4c14c33f095cee..5d92ee6eb60d18 100644
--- a/api/models/provider.py
+++ b/api/models/provider.py
@@ -1,7 +1,8 @@
from enum import Enum
from extensions.ext_database import db
-from models import StringUUID
+
+from .types import StringUUID
class ProviderType(Enum):
diff --git a/api/models/source.py b/api/models/source.py
index 265e68f014c6c2..adc00028bee43b 100644
--- a/api/models/source.py
+++ b/api/models/source.py
@@ -3,7 +3,8 @@
from sqlalchemy.dialects.postgresql import JSONB
from extensions.ext_database import db
-from models import StringUUID
+
+from .types import StringUUID
class DataSourceOauthBinding(db.Model):
diff --git a/api/models/tool.py b/api/models/tool.py
index f322944f5f0a8e..79a70c6b1f2d22 100644
--- a/api/models/tool.py
+++ b/api/models/tool.py
@@ -2,7 +2,8 @@
from enum import Enum
from extensions.ext_database import db
-from models import StringUUID
+
+from .types import StringUUID
class ToolProviderName(Enum):
diff --git a/api/models/tools.py b/api/models/tools.py
index 49212916ec5195..069dc5bad083c8 100644
--- a/api/models/tools.py
+++ b/api/models/tools.py
@@ -6,8 +6,9 @@
from core.tools.entities.tool_bundle import ApiToolBundle
from core.tools.entities.tool_entities import ApiProviderSchemaType, WorkflowToolParameterConfiguration
from extensions.ext_database import db
-from models import StringUUID
-from models.model import Account, App, Tenant
+
+from .model import Account, App, Tenant
+from .types import StringUUID
class BuiltinToolProvider(db.Model):
@@ -299,4 +300,4 @@ class ToolFile(db.Model):
# mime type
mimetype = db.Column(db.String(255), nullable=False)
# original url
- original_url = db.Column(db.String(255), nullable=True)
\ No newline at end of file
+ original_url = db.Column(db.String(2048), nullable=True)
\ No newline at end of file
diff --git a/api/models/types.py b/api/models/types.py
new file mode 100644
index 00000000000000..1614ec20188541
--- /dev/null
+++ b/api/models/types.py
@@ -0,0 +1,26 @@
+from sqlalchemy import CHAR, TypeDecorator
+from sqlalchemy.dialects.postgresql import UUID
+
+
+class StringUUID(TypeDecorator):
+ impl = CHAR
+ cache_ok = True
+
+ def process_bind_param(self, value, dialect):
+ if value is None:
+ return value
+ elif dialect.name == 'postgresql':
+ return str(value)
+ else:
+ return value.hex
+
+ def load_dialect_impl(self, dialect):
+ if dialect.name == 'postgresql':
+ return dialect.type_descriptor(UUID())
+ else:
+ return dialect.type_descriptor(CHAR(36))
+
+ def process_result_value(self, value, dialect):
+ if value is None:
+ return value
+ return str(value)
\ No newline at end of file
diff --git a/api/models/web.py b/api/models/web.py
index 6fd27206a972db..0e901d5f842691 100644
--- a/api/models/web.py
+++ b/api/models/web.py
@@ -1,7 +1,8 @@
from extensions.ext_database import db
-from models import StringUUID
-from models.model import Message
+
+from .model import Message
+from .types import StringUUID
class SavedMessage(db.Model):
diff --git a/api/models/workflow.py b/api/models/workflow.py
index df2269cd0fb6cc..759e07c7154e0d 100644
--- a/api/models/workflow.py
+++ b/api/models/workflow.py
@@ -3,18 +3,18 @@
from enum import Enum
from typing import Any, Optional, Union
+from sqlalchemy import func
+from sqlalchemy.orm import Mapped
+
import contexts
from constants import HIDDEN_VALUE
-from core.app.segments import (
- SecretVariable,
- Variable,
- factory,
-)
+from core.app.segments import SecretVariable, Variable, factory
from core.helper import encrypter
from extensions.ext_database import db
from libs import helper
-from models import StringUUID
-from models.account import Account
+
+from .account import Account
+from .types import StringUUID
class CreatedByRole(Enum):
@@ -122,6 +122,7 @@ class Workflow(db.Model):
updated_by = db.Column(StringUUID)
updated_at = db.Column(db.DateTime)
_environment_variables = db.Column('environment_variables', db.Text, nullable=False, server_default='{}')
+ _conversation_variables = db.Column('conversation_variables', db.Text, nullable=False, server_default='{}')
@property
def created_by_account(self):
@@ -249,9 +250,27 @@ def to_dict(self, *, include_secret: bool = False) -> Mapping[str, Any]:
'graph': self.graph_dict,
'features': self.features_dict,
'environment_variables': [var.model_dump(mode='json') for var in environment_variables],
+ 'conversation_variables': [var.model_dump(mode='json') for var in self.conversation_variables],
}
return result
+ @property
+ def conversation_variables(self) -> Sequence[Variable]:
+ # TODO: find some way to init `self._conversation_variables` when instance created.
+ if self._conversation_variables is None:
+ self._conversation_variables = '{}'
+
+ variables_dict: dict[str, Any] = json.loads(self._conversation_variables)
+ results = [factory.build_variable_from_mapping(v) for v in variables_dict.values()]
+ return results
+
+ @conversation_variables.setter
+ def conversation_variables(self, value: Sequence[Variable]) -> None:
+ self._conversation_variables = json.dumps(
+ {var.name: var.model_dump() for var in value},
+ ensure_ascii=False,
+ )
+
class WorkflowRunTriggeredFrom(Enum):
"""
@@ -702,3 +721,34 @@ def created_by_end_user(self):
created_by_role = CreatedByRole.value_of(self.created_by_role)
return db.session.get(EndUser, self.created_by) \
if created_by_role == CreatedByRole.END_USER else None
+
+
+class ConversationVariable(db.Model):
+ __tablename__ = 'workflow__conversation_variables'
+
+ id: Mapped[str] = db.Column(StringUUID, primary_key=True)
+ conversation_id: Mapped[str] = db.Column(StringUUID, nullable=False, primary_key=True)
+ app_id: Mapped[str] = db.Column(StringUUID, nullable=False, index=True)
+ data = db.Column(db.Text, nullable=False)
+ created_at = db.Column(db.DateTime, nullable=False, index=True, server_default=db.text('CURRENT_TIMESTAMP(0)'))
+ updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp())
+
+ def __init__(self, *, id: str, app_id: str, conversation_id: str, data: str) -> None:
+ self.id = id
+ self.app_id = app_id
+ self.conversation_id = conversation_id
+ self.data = data
+
+ @classmethod
+ def from_variable(cls, *, app_id: str, conversation_id: str, variable: Variable) -> 'ConversationVariable':
+ obj = cls(
+ id=variable.id,
+ app_id=app_id,
+ conversation_id=conversation_id,
+ data=variable.model_dump_json(),
+ )
+ return obj
+
+ def to_variable(self) -> Variable:
+ mapping = json.loads(self.data)
+ return factory.build_variable_from_mapping(mapping)
diff --git a/api/poetry.lock b/api/poetry.lock
index 8a4f7584337376..358f9f8510c724 100644
--- a/api/poetry.lock
+++ b/api/poetry.lock
@@ -1,91 +1,103 @@
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+[[package]]
+name = "aiohappyeyeballs"
+version = "2.3.4"
+description = "Happy Eyeballs for asyncio"
+optional = false
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "aiohappyeyeballs-2.3.4-py3-none-any.whl", hash = "sha256:40a16ceffcf1fc9e142fd488123b2e218abc4188cf12ac20c67200e1579baa42"},
+ {file = "aiohappyeyeballs-2.3.4.tar.gz", hash = "sha256:7e1ae8399c320a8adec76f6c919ed5ceae6edd4c3672f4d9eae2b27e37c80ff6"},
+]
+
[[package]]
name = "aiohttp"
-version = "3.9.5"
+version = "3.10.1"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"},
- {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"},
- {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"},
- {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"},
- {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"},
- {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"},
- {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"},
- {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"},
- {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"},
- {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"},
- {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"},
- {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"},
- {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"},
- {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"},
- {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"},
- {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"},
- {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"},
- {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"},
- {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"},
- {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"},
- {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"},
- {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"},
- {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"},
- {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"},
- {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"},
- {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"},
- {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"},
- {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"},
- {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"},
- {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"},
- {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"},
- {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"},
- {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"},
- {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"},
- {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"},
- {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"},
- {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"},
- {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"},
- {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"},
- {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"},
- {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"},
- {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"},
- {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"},
- {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"},
- {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"},
- {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"},
- {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"},
- {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"},
- {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"},
- {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"},
- {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"},
- {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"},
- {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"},
- {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"},
- {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"},
- {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"},
- {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"},
- {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"},
- {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"},
- {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"},
- {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"},
- {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"},
- {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"},
- {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"},
- {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"},
- {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"},
- {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"},
- {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"},
- {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"},
- {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"},
- {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"},
- {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"},
- {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"},
- {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"},
- {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"},
- {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"},
-]
-
-[package.dependencies]
+ {file = "aiohttp-3.10.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:47b4c2412960e64d97258f40616efddaebcb34ff664c8a972119ed38fac2a62c"},
+ {file = "aiohttp-3.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7dbf637f87dd315fa1f36aaed8afa929ee2c607454fb7791e74c88a0d94da59"},
+ {file = "aiohttp-3.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c8fb76214b5b739ce59e2236a6489d9dc3483649cfd6f563dbf5d8e40dbdd57d"},
+ {file = "aiohttp-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c577cdcf8f92862363b3d598d971c6a84ed8f0bf824d4cc1ce70c2fb02acb4a"},
+ {file = "aiohttp-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:777e23609899cb230ad2642b4bdf1008890f84968be78de29099a8a86f10b261"},
+ {file = "aiohttp-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b07286a1090483799599a2f72f76ac396993da31f6e08efedb59f40876c144fa"},
+ {file = "aiohttp-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9db600a86414a9a653e3c1c7f6a2f6a1894ab8f83d11505247bd1b90ad57157"},
+ {file = "aiohttp-3.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c3f1eb280008e51965a8d160a108c333136f4a39d46f516c64d2aa2e6a53f2"},
+ {file = "aiohttp-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f5dd109a925fee4c9ac3f6a094900461a2712df41745f5d04782ebcbe6479ccb"},
+ {file = "aiohttp-3.10.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8c81ff4afffef9b1186639506d70ea90888218f5ddfff03870e74ec80bb59970"},
+ {file = "aiohttp-3.10.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2a384dfbe8bfebd203b778a30a712886d147c61943675f4719b56725a8bbe803"},
+ {file = "aiohttp-3.10.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:b9fb6508893dc31cfcbb8191ef35abd79751db1d6871b3e2caee83959b4d91eb"},
+ {file = "aiohttp-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:88596384c3bec644a96ae46287bb646d6a23fa6014afe3799156aef42669c6bd"},
+ {file = "aiohttp-3.10.1-cp310-cp310-win32.whl", hash = "sha256:68164d43c580c2e8bf8e0eb4960142919d304052ccab92be10250a3a33b53268"},
+ {file = "aiohttp-3.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:d6bbe2c90c10382ca96df33b56e2060404a4f0f88673e1e84b44c8952517e5f3"},
+ {file = "aiohttp-3.10.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f6979b4f20d3e557a867da9d9227de4c156fcdcb348a5848e3e6190fd7feb972"},
+ {file = "aiohttp-3.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03c0c380c83f8a8d4416224aafb88d378376d6f4cadebb56b060688251055cd4"},
+ {file = "aiohttp-3.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1c2b104e81b3c3deba7e6f5bc1a9a0e9161c380530479970766a6655b8b77c7c"},
+ {file = "aiohttp-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b023b68c61ab0cd48bd38416b421464a62c381e32b9dc7b4bdfa2905807452a4"},
+ {file = "aiohttp-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a07c76a82390506ca0eabf57c0540cf5a60c993c442928fe4928472c4c6e5e6"},
+ {file = "aiohttp-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:41d8dab8c64ded1edf117d2a64f353efa096c52b853ef461aebd49abae979f16"},
+ {file = "aiohttp-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:615348fab1a9ef7d0960a905e83ad39051ae9cb0d2837da739b5d3a7671e497a"},
+ {file = "aiohttp-3.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:256ee6044214ee9d66d531bb374f065ee94e60667d6bbeaa25ca111fc3997158"},
+ {file = "aiohttp-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b7d5bb926805022508b7ddeaad957f1fce7a8d77532068d7bdb431056dc630cd"},
+ {file = "aiohttp-3.10.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:028faf71b338f069077af6315ad54281612705d68889f5d914318cbc2aab0d50"},
+ {file = "aiohttp-3.10.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:5c12310d153b27aa630750be44e79313acc4e864c421eb7d2bc6fa3429c41bf8"},
+ {file = "aiohttp-3.10.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:de1a91d5faded9054957ed0a9e01b9d632109341942fc123947ced358c5d9009"},
+ {file = "aiohttp-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9c186b270979fb1dee3ababe2d12fb243ed7da08b30abc83ebac3a928a4ddb15"},
+ {file = "aiohttp-3.10.1-cp311-cp311-win32.whl", hash = "sha256:4a9ce70f5e00380377aac0e568abd075266ff992be2e271765f7b35d228a990c"},
+ {file = "aiohttp-3.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:a77c79bac8d908d839d32c212aef2354d2246eb9deb3e2cb01ffa83fb7a6ea5d"},
+ {file = "aiohttp-3.10.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2212296cdb63b092e295c3e4b4b442e7b7eb41e8a30d0f53c16d5962efed395d"},
+ {file = "aiohttp-3.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4dcb127ca3eb0a61205818a606393cbb60d93b7afb9accd2fd1e9081cc533144"},
+ {file = "aiohttp-3.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb8b79a65332e1a426ccb6290ce0409e1dc16b4daac1cc5761e059127fa3d134"},
+ {file = "aiohttp-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68cc24f707ed9cb961f6ee04020ca01de2c89b2811f3cf3361dc7c96a14bfbcc"},
+ {file = "aiohttp-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cb54f5725b4b37af12edf6c9e834df59258c82c15a244daa521a065fbb11717"},
+ {file = "aiohttp-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:51d03e948e53b3639ce4d438f3d1d8202898ec6655cadcc09ec99229d4adc2a9"},
+ {file = "aiohttp-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:786299d719eb5d868f161aeec56d589396b053925b7e0ce36e983d30d0a3e55c"},
+ {file = "aiohttp-3.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abda4009a30d51d3f06f36bc7411a62b3e647fa6cc935ef667e3e3d3a7dd09b1"},
+ {file = "aiohttp-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:67f7639424c313125213954e93a6229d3a1d386855d70c292a12628f600c7150"},
+ {file = "aiohttp-3.10.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8e5a26d7aac4c0d8414a347da162696eea0629fdce939ada6aedf951abb1d745"},
+ {file = "aiohttp-3.10.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:120548d89f14b76a041088b582454d89389370632ee12bf39d919cc5c561d1ca"},
+ {file = "aiohttp-3.10.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f5293726943bdcea24715b121d8c4ae12581441d22623b0e6ab12d07ce85f9c4"},
+ {file = "aiohttp-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1f8605e573ed6c44ec689d94544b2c4bb1390aaa723a8b5a2cc0a5a485987a68"},
+ {file = "aiohttp-3.10.1-cp312-cp312-win32.whl", hash = "sha256:e7168782621be4448d90169a60c8b37e9b0926b3b79b6097bc180c0a8a119e73"},
+ {file = "aiohttp-3.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fbf8c0ded367c5c8eaf585f85ca8dd85ff4d5b73fb8fe1e6ac9e1b5e62e11f7"},
+ {file = "aiohttp-3.10.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:54b7f4a20d7cc6bfa4438abbde069d417bb7a119f870975f78a2b99890226d55"},
+ {file = "aiohttp-3.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2fa643ca990323db68911b92f3f7a0ca9ae300ae340d0235de87c523601e58d9"},
+ {file = "aiohttp-3.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d8311d0d690487359fe2247ec5d2cac9946e70d50dced8c01ce9e72341c21151"},
+ {file = "aiohttp-3.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222821c60b8f6a64c5908cb43d69c0ee978a1188f6a8433d4757d39231b42cdb"},
+ {file = "aiohttp-3.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7b55d9ede66af7feb6de87ff277e0ccf6d51c7db74cc39337fe3a0e31b5872d"},
+ {file = "aiohttp-3.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a95151a5567b3b00368e99e9c5334a919514f60888a6b6d2054fea5e66e527e"},
+ {file = "aiohttp-3.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e9e9171d2fe6bfd9d3838a6fe63b1e91b55e0bf726c16edf265536e4eafed19"},
+ {file = "aiohttp-3.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a57e73f9523e980f6101dc9a83adcd7ac0006ea8bf7937ca3870391c7bb4f8ff"},
+ {file = "aiohttp-3.10.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0df51a3d70a2bfbb9c921619f68d6d02591f24f10e9c76de6f3388c89ed01de6"},
+ {file = "aiohttp-3.10.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:b0de63ff0307eac3961b4af74382d30220d4813f36b7aaaf57f063a1243b4214"},
+ {file = "aiohttp-3.10.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8db9b749f589b5af8e4993623dbda6716b2b7a5fcb0fa2277bf3ce4b278c7059"},
+ {file = "aiohttp-3.10.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6b14c19172eb53b63931d3e62a9749d6519f7c121149493e6eefca055fcdb352"},
+ {file = "aiohttp-3.10.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5cd57ad998e3038aa87c38fe85c99ed728001bf5dde8eca121cadee06ee3f637"},
+ {file = "aiohttp-3.10.1-cp38-cp38-win32.whl", hash = "sha256:df31641e3f02b77eb3c5fb63c0508bee0fc067cf153da0e002ebbb0db0b6d91a"},
+ {file = "aiohttp-3.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:93094eba50bc2ad4c40ff4997ead1fdcd41536116f2e7d6cfec9596a8ecb3615"},
+ {file = "aiohttp-3.10.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:440954ddc6b77257e67170d57b1026aa9545275c33312357472504eef7b4cc0b"},
+ {file = "aiohttp-3.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f9f8beed277488a52ee2b459b23c4135e54d6a819eaba2e120e57311015b58e9"},
+ {file = "aiohttp-3.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8a8221a63602008550022aa3a4152ca357e1dde7ab3dd1da7e1925050b56863"},
+ {file = "aiohttp-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a702bd3663b5cbf3916e84bf332400d24cdb18399f0877ca6b313ce6c08bfb43"},
+ {file = "aiohttp-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1988b370536eb14f0ce7f3a4a5b422ab64c4e255b3f5d7752c5f583dc8c967fc"},
+ {file = "aiohttp-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ccf1f0a304352c891d124ac1a9dea59b14b2abed1704aaa7689fc90ef9c5be1"},
+ {file = "aiohttp-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc3ea6ef2a83edad84bbdb5d96e22f587b67c68922cd7b6f9d8f24865e655bcf"},
+ {file = "aiohttp-3.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89b47c125ab07f0831803b88aeb12b04c564d5f07a1c1a225d4eb4d2f26e8b5e"},
+ {file = "aiohttp-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:21778552ef3d44aac3278cc6f6d13a6423504fa5f09f2df34bfe489ed9ded7f5"},
+ {file = "aiohttp-3.10.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bde0693073fd5e542e46ea100aa6c1a5d36282dbdbad85b1c3365d5421490a92"},
+ {file = "aiohttp-3.10.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:bf66149bb348d8e713f3a8e0b4f5b952094c2948c408e1cfef03b49e86745d60"},
+ {file = "aiohttp-3.10.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:587237571a85716d6f71f60d103416c9df7d5acb55d96d3d3ced65f39bff9c0c"},
+ {file = "aiohttp-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bfe33cba6e127d0b5b417623c9aa621f0a69f304742acdca929a9fdab4593693"},
+ {file = "aiohttp-3.10.1-cp39-cp39-win32.whl", hash = "sha256:9fbff00646cf8211b330690eb2fd64b23e1ce5b63a342436c1d1d6951d53d8dd"},
+ {file = "aiohttp-3.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:5951c328f9ac42d7bce7a6ded535879bc9ae13032818d036749631fa27777905"},
+ {file = "aiohttp-3.10.1.tar.gz", hash = "sha256:8b0d058e4e425d3b45e8ec70d49b402f4d6b21041e674798b1f91ba027c73f28"},
+]
+
+[package.dependencies]
+aiohappyeyeballs = ">=2.3.0"
aiosignal = ">=1.1.2"
async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""}
attrs = ">=17.3.0"
@@ -94,7 +106,7 @@ multidict = ">=4.5,<7.0"
yarl = ">=1.0,<2.0"
[package.extras]
-speedups = ["Brotli", "aiodns", "brotlicffi"]
+speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"]
[[package]]
name = "aiohttp-retry"
@@ -145,16 +157,16 @@ tz = ["backports.zoneinfo"]
[[package]]
name = "alibabacloud-credentials"
-version = "0.3.4"
+version = "0.3.5"
description = "The alibabacloud credentials module of alibabaCloud Python SDK."
optional = false
python-versions = ">=3.6"
files = [
- {file = "alibabacloud_credentials-0.3.4.tar.gz", hash = "sha256:c15a34fe782c318d4cf24cb041a0385ac4ccd2548e524e5d7fe1cff56a9a6acc"},
+ {file = "alibabacloud_credentials-0.3.5.tar.gz", hash = "sha256:ad065ec95921eaf51939195485d0e5cc9e0ea050282059c7d8bf74bdb5496177"},
]
[package.dependencies]
-alibabacloud-tea = "*"
+alibabacloud-tea = ">=0.3.9"
[[package]]
name = "alibabacloud-endpoint-util"
@@ -171,16 +183,16 @@ alibabacloud-tea = ">=0.0.1"
[[package]]
name = "alibabacloud-gateway-spi"
-version = "0.0.1"
+version = "0.0.2"
description = "Alibaba Cloud Gateway SPI SDK Library for Python"
optional = false
python-versions = ">=3.6"
files = [
- {file = "alibabacloud_gateway_spi-0.0.1.tar.gz", hash = "sha256:1b259855708afc3c04d8711d8530c63f7645e1edc0cf97e2fd15461b08e11c30"},
+ {file = "alibabacloud_gateway_spi-0.0.2.tar.gz", hash = "sha256:f932c8ba67291531dfbee6ca521dcf3523eb4ff93512bf0aaf135f2d4fc4704d"},
]
[package.dependencies]
-alibabacloud_credentials = ">=0.2.0,<1.0.0"
+alibabacloud_credentials = ">=0.3.4,<1.0.0"
[[package]]
name = "alibabacloud-gpdb20160503"
@@ -294,19 +306,19 @@ alibabacloud-tea = ">=0.0.1"
[[package]]
name = "alibabacloud-tea-openapi"
-version = "0.3.10"
+version = "0.3.11"
description = "Alibaba Cloud openapi SDK Library for Python"
optional = false
python-versions = ">=3.6"
files = [
- {file = "alibabacloud_tea_openapi-0.3.10.tar.gz", hash = "sha256:46e9c54ea857346306cd5c628dc33479349b559179ed2fdb2251dbe6ec9a1cf1"},
+ {file = "alibabacloud_tea_openapi-0.3.11.tar.gz", hash = "sha256:3f5cace1b1aeb8a64587574097403cfd066b86ee4c3c9abde587f9abfcad38de"},
]
[package.dependencies]
alibabacloud_credentials = ">=0.3.1,<1.0.0"
alibabacloud_gateway_spi = ">=0.0.1,<1.0.0"
alibabacloud_openapi_util = ">=0.2.1,<1.0.0"
-alibabacloud_tea_util = ">=0.3.12,<1.0.0"
+alibabacloud_tea_util = ">=0.3.13,<1.0.0"
alibabacloud_tea_xml = ">=0.0.2,<1.0.0"
[[package]]
@@ -493,22 +505,22 @@ files = [
[[package]]
name = "attrs"
-version = "23.2.0"
+version = "24.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
- {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
- {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
+ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
+ {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
]
[package.extras]
-cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
-dev = ["attrs[tests]", "pre-commit"]
-docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
-tests = ["attrs[tests-no-zope]", "zope-interface"]
-tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
-tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "authlib"
@@ -669,17 +681,17 @@ files = [
[[package]]
name = "boto3"
-version = "1.34.136"
+version = "1.34.148"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "boto3-1.34.136-py3-none-any.whl", hash = "sha256:d41037e2c680ab8d6c61a0a4ee6bf1fdd9e857f43996672830a95d62d6f6fa79"},
- {file = "boto3-1.34.136.tar.gz", hash = "sha256:0314e6598f59ee0f34eb4e6d1a0f69fa65c146d2b88a6e837a527a9956ec2731"},
+ {file = "boto3-1.34.148-py3-none-any.whl", hash = "sha256:d63d36e5a34533ba69188d56f96da132730d5e9932c4e11c02d79319cd1afcec"},
+ {file = "boto3-1.34.148.tar.gz", hash = "sha256:2058397f0a92c301e3116e9e65fbbc70ea49270c250882d65043d19b7c6e2d17"},
]
[package.dependencies]
-botocore = ">=1.34.136,<1.35.0"
+botocore = ">=1.34.148,<1.35.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.10.0,<0.11.0"
@@ -688,13 +700,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
-version = "1.34.147"
+version = "1.34.155"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.8"
files = [
- {file = "botocore-1.34.147-py3-none-any.whl", hash = "sha256:be94a2f4874b1d1705cae2bd512c475047497379651678593acb6c61c50d91de"},
- {file = "botocore-1.34.147.tar.gz", hash = "sha256:2e8f000b77e4ca345146cb2edab6403769a517b564f627bb084ab335417f3dbe"},
+ {file = "botocore-1.34.155-py3-none-any.whl", hash = "sha256:f2696c11bb0cad627d42512937befd2e3f966aedd15de00d90ee13cf7a16b328"},
+ {file = "botocore-1.34.155.tar.gz", hash = "sha256:3aa88abfef23909f68d3e6679a3d4b4bb3c6288a6cfbf9e253aa68dac8edad64"},
]
[package.dependencies]
@@ -703,7 +715,7 @@ python-dateutil = ">=2.1,<3.0.0"
urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}
[package.extras]
-crt = ["awscrt (==0.20.11)"]
+crt = ["awscrt (==0.21.2)"]
[[package]]
name = "bottleneck"
@@ -1011,63 +1023,78 @@ files = [
[[package]]
name = "cffi"
-version = "1.16.0"
+version = "1.17.0"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.8"
files = [
- {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"},
- {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"},
- {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"},
- {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"},
- {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"},
- {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"},
- {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"},
- {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"},
- {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"},
- {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"},
- {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"},
- {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"},
- {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"},
- {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"},
- {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"},
- {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"},
- {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"},
- {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"},
- {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"},
- {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"},
- {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"},
- {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"},
- {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"},
- {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"},
- {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"},
- {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"},
- {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"},
- {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"},
- {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"},
- {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"},
- {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"},
- {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"},
- {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"},
- {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"},
- {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"},
- {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"},
- {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"},
- {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"},
- {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"},
- {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"},
- {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"},
- {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"},
- {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"},
- {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"},
- {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"},
- {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"},
- {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"},
- {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"},
- {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"},
- {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"},
- {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"},
- {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"},
+ {file = "cffi-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb"},
+ {file = "cffi-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a"},
+ {file = "cffi-1.17.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42"},
+ {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d"},
+ {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2"},
+ {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab"},
+ {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b"},
+ {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206"},
+ {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa"},
+ {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f"},
+ {file = "cffi-1.17.0-cp310-cp310-win32.whl", hash = "sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc"},
+ {file = "cffi-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2"},
+ {file = "cffi-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720"},
+ {file = "cffi-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9"},
+ {file = "cffi-1.17.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb"},
+ {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424"},
+ {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d"},
+ {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8"},
+ {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6"},
+ {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91"},
+ {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8"},
+ {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb"},
+ {file = "cffi-1.17.0-cp311-cp311-win32.whl", hash = "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9"},
+ {file = "cffi-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0"},
+ {file = "cffi-1.17.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc"},
+ {file = "cffi-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59"},
+ {file = "cffi-1.17.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb"},
+ {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195"},
+ {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e"},
+ {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828"},
+ {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150"},
+ {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a"},
+ {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885"},
+ {file = "cffi-1.17.0-cp312-cp312-win32.whl", hash = "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492"},
+ {file = "cffi-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2"},
+ {file = "cffi-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118"},
+ {file = "cffi-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7"},
+ {file = "cffi-1.17.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377"},
+ {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb"},
+ {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555"},
+ {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204"},
+ {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f"},
+ {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0"},
+ {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4"},
+ {file = "cffi-1.17.0-cp313-cp313-win32.whl", hash = "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a"},
+ {file = "cffi-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7"},
+ {file = "cffi-1.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c"},
+ {file = "cffi-1.17.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e"},
+ {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b"},
+ {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e"},
+ {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401"},
+ {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c"},
+ {file = "cffi-1.17.0-cp38-cp38-win32.whl", hash = "sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499"},
+ {file = "cffi-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c"},
+ {file = "cffi-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2"},
+ {file = "cffi-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759"},
+ {file = "cffi-1.17.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4"},
+ {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82"},
+ {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf"},
+ {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058"},
+ {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932"},
+ {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693"},
+ {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3"},
+ {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4"},
+ {file = "cffi-1.17.0-cp39-cp39-win32.whl", hash = "sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb"},
+ {file = "cffi-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29"},
+ {file = "cffi-1.17.0.tar.gz", hash = "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76"},
]
[package.dependencies]
@@ -1343,77 +1370,77 @@ testing = ["pytest (>=7.2.1)", "pytest-cov (>=4.0.0)", "tox (>=4.4.3)"]
[[package]]
name = "clickhouse-connect"
-version = "0.7.16"
+version = "0.7.18"
description = "ClickHouse Database Core Driver for Python, Pandas, and Superset"
optional = false
python-versions = "~=3.8"
files = [
- {file = "clickhouse-connect-0.7.16.tar.gz", hash = "sha256:253a2089efad5729903d00382f73fa8da2cbbfdb118db498cf708ee9f4a2134f"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00413deb9e086aabf661d18ac3a3539f25eb773c3675f49353e0d7e6ef1205fc"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:faadaf206ea7753782db017daedbf592e4edc7c71cb985aad787eb9dc516bf21"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1db8f1168f33fda78adddb733913b211ddf648984d8fef8d934e30df876e5f23"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fa630bf50fb064cc53b7ea5d862066476d3c6074003f6d39d2594fb1a7abf67"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2cba9547dad41b2d333458615208a3c7db6f56a63473ffea2c05c44225ffa020"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:480f7856fcf42a21f17886e0b42d70499067c865fc2a0ea7c0eb5c0bdca281a8"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b65f3eb570cbcf9fa383b4e0925d1ceb3efd3deba42a435625cad75b3a9ff7f3"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b78d3cc0fe42374bb9d5a05ba71578dc69f7e4b4c771e86dcf292ae0412265cc"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-win32.whl", hash = "sha256:1cb76b26fcde1ba6a8ae68e1db1f9e42d458879a0d4d2c9843cc998f42f445ac"},
- {file = "clickhouse_connect-0.7.16-cp310-cp310-win_amd64.whl", hash = "sha256:9298b344168271e952ea41021963ca1b81b9b3c38be8b036cb64a2556edbb4b7"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ae39a765735cc6e786e5f9a0dba799e7f8ee0bbd5dfc5d5ff755dfa9dd13855"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f32546f65dd234a49310cda454713a5f7fbc8ba978744e070355c7ea8819a5a"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20865c81a5b378625a528ac8960e08cdca316147f87fad6deb9f16c0d5e5f62f"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609c076261d779703bf29e7a27dafc8283153403ceab1ec23d50eb2acabc4b9d"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e07862e75ac7419c5671384055f11ca5e76dc2c0be4a6f3aed7bf419997184bc"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d5db7da6f20b9a49b288063de9b3224a56634f8cb94d19d435af518ed81872c3"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:955c567ede68a10325045bb2adf1314ff569dfb7e52f6074c18182f3803279f6"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df517bfe23d85f5aeeb17b262c06d0a5c24e0baea09688a96d02dc8589ef8b07"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-win32.whl", hash = "sha256:7f2c6132fc90df6a8318abb9f257c2b777404908b7d168ac08235d516f65a663"},
- {file = "clickhouse_connect-0.7.16-cp311-cp311-win_amd64.whl", hash = "sha256:ca1dba53da86691a11671d846988dc4f6ad02a66f5a0df9a87a46dc4ec9bb0a1"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f8f7260073b6ee63e19d442ebb6954bc7741a5ce4ed563eb8074c8c6a0158eca"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b3dd93ada1099cb6df244d79973c811e90a4590685e78e60e8846914b3c261e"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d3c3458bce25fe9c10e1dbf82dbeeeb2f04e382130f9811cc3bedf44c2028ca"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcc302390b4ea975efd8d2ca53d295d40dc766179dd5e9fc158e808f01d9280d"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a94f6d095d7174c55825e0b5c04b77897a1b2a8a8bbb38f3f773fd3113a7be27"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6b7e2572993ef2e1dee5012875a7a2d08cede319e32ccdd2db90ed26a0d0c037"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e9c35ee425309ed8ef63bae31e1d3c5f35706fa27ae2836e61e7cb9bbe7f00cb"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:eb0471d5a32d07eaa37772871ee9e6b5eb37ab907c3c154833824ed68ee4795b"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-win32.whl", hash = "sha256:b531ee18b4ce16f1d2b8f6249859cbd600f7e0f312f80dda8deb969791a90f17"},
- {file = "clickhouse_connect-0.7.16-cp312-cp312-win_amd64.whl", hash = "sha256:38392308344770864843f7f8b914799684c13ce4b272d5a3a55e5512ff8a3ae0"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:052ca80d66e49c94d103c9842d2a5b0ebf4610981b79164660ef6b1bdc4b5e85"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b496059d145c68e956aa10cd04e5c7cb4e97312eb3f7829cec8f4f7024f8ced6"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de1e423fc9c415b9fdcbb6f23eccae981e3f0f0cf142e518efec709bda7c1394"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:555c64719cbc72675d58ea6dfc144fa8064ea1d673a54afd2d54e34c58f17c6b"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0c3c063ab23df8f71a36505880bf5de6c18aee246938d787447e52b4d9d5531"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5ed62e08cfe445d0430b91c26fb276e2a5175e456e9786594fb6e67c9ebd8c6c"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9eb056bd14ca3c1d7e3edd7ca79ea970d45e5e536930dbb6179aeb965d5bc3d"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:54e0a03b685ee6c138954846dafb6ec0e0baf8257f2587c61e34c017f3dc9d63"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-win32.whl", hash = "sha256:d8402c3145387726bd19f916ca2890576be70c4493f030c068f6f03a75addff7"},
- {file = "clickhouse_connect-0.7.16-cp38-cp38-win_amd64.whl", hash = "sha256:70e376d2ebc0f092fae35f7b50ff7296ee8ffd2dda3536238f6c39a5c949d115"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cee4f91ad22401c3b96f5df3f3149ef2894e7c2d00b5abd9da80119e7b6592f7"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a3009145f35e9ac2535dbd8fdbdc218abfe0971c9bc9b730eb5c3f6c40faeb5f"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d0ef9f877ffbcb0f526ce9c35c657fc54930d043e45c077d9d886c0f1add727"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc437b3ff2f7991b209b861a89c003ac1971c890775190178438780e967a9d3"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ed836dcee4ac097bd83714abe0af987b1ef767675a555e7643d793164c3f1cc"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4c4e0d173239c0b4594c8703fae5c8ba3241c4e0763a8cf436b94564692671f9"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a17a348dd8c00df343a01128497e8c3a6ae431f13c7a88e363ac12c035316ce0"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:805ae7ad39c043af13e2b5af45abb70330f0907749dc87ad4a2481a4ac209cc6"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-win32.whl", hash = "sha256:38fc6ca1bd73cf4dcebd22fbb8dceda267908ff674fc57fbc23c3b5df9c21ac1"},
- {file = "clickhouse_connect-0.7.16-cp39-cp39-win_amd64.whl", hash = "sha256:3dc67e99e40b5a8bc493a21016830b0f3800006a6038c1fd881f7cae6246cc44"},
- {file = "clickhouse_connect-0.7.16-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b7f526fef71bd5265f47915340a6369a5b5685278b72b5aff281cc521a8ec376"},
- {file = "clickhouse_connect-0.7.16-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e00f87ba68bbc63dd32d7a304fd629b759f24b09f88fbc2bac0a9ed1fe7b2938"},
- {file = "clickhouse_connect-0.7.16-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c84f3b64d6bebedcfbbd19e8369b3df2cb7d313afb2a0d64a3e151d344c1c1"},
- {file = "clickhouse_connect-0.7.16-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d104ab78edee26e8cef056e2db83f03e1da918df0946e1ef1ad9a27a024dd0"},
- {file = "clickhouse_connect-0.7.16-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cc1ad53e282ff5b4288fdfcf6df72cda542d9d997de5889d66a1f8e2b9f477f0"},
- {file = "clickhouse_connect-0.7.16-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fddc99322054f5d3df8715ab3724bd36ac636f8ceaed4f5f3f60d377abd22d22"},
- {file = "clickhouse_connect-0.7.16-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:765a2de98197d1b4f6424611ceaca2ae896a1d7093b943403973888cb7c144e6"},
- {file = "clickhouse_connect-0.7.16-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1540e0a93e5f2147400f644606a399c91705066f05d5a91429616ee9812f4521"},
- {file = "clickhouse_connect-0.7.16-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba928c4178b0d4a513e1b0ad32a464ab56cb1bc27736a7f41b32e4eb70eb08d6"},
- {file = "clickhouse_connect-0.7.16-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a17ffc22e905081f002173b30959089de6987fd40c87e7794da9d978d723e610"},
- {file = "clickhouse_connect-0.7.16-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:26df09787232b495285d8358db145b9770f472e2e30147912634c5b56392e73f"},
- {file = "clickhouse_connect-0.7.16-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2a3ce33241441dc7c718c19e31645323e6c5da793d46bbb670fd4e8557b8605"},
- {file = "clickhouse_connect-0.7.16-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29f9dc9cc1f4ec4a333bf119abb5cee13563e89bc990d4d77b8f43cf630e9fb1"},
- {file = "clickhouse_connect-0.7.16-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a16a7ada11996a6fa0959c83e2e46ff32773e57eca40eff86176fd62a30054ca"},
- {file = "clickhouse_connect-0.7.16-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ead20e1d4f3c5493dd075b7dc81b5d21be4b876aca6952e1c155824876c621f3"},
+ {file = "clickhouse-connect-0.7.18.tar.gz", hash = "sha256:516aba1fdcf58973b0d0d90168a60c49f6892b6db1183b932f80ae057994eadb"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43e712b8fada717160153022314473826adffde00e8cbe8068e0aa1c187c2395"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0a21244d24c9b2a7d1ea2cf23f254884113e0f6d9950340369ce154d7d377165"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:347b19f3674b57906dea94dd0e8b72aaedc822131cc2a2383526b19933ed7a33"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23c5aa1b144491211f662ed26f279845fb367c37d49b681b783ca4f8c51c7891"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99b4271ed08cc59162a6025086f1786ded5b8a29f4c38e2d3b2a58af04f85f5"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:27d76d1dbe988350567dab7fbcc0a54cdd25abedc5585326c753974349818694"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:d2cd40b4e07df277192ab6bcb187b3f61e0074ad0e256908bf443b3080be4a6c"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8f4ae2c4fb66b2b49f2e7f893fe730712a61a068e79f7272e60d4dd7d64df260"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-win32.whl", hash = "sha256:ed871195b25a4e1acfd37f59527ceb872096f0cd65d76af8c91f581c033b1cc0"},
+ {file = "clickhouse_connect-0.7.18-cp310-cp310-win_amd64.whl", hash = "sha256:0c4989012e434b9c167bddf9298ca6eb076593e48a2cab7347cd70a446a7b5d3"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:52cfcd77fc63561e7b51940e32900c13731513d703d7fc54a3a6eb1fa4f7be4e"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:71d7bb9a24b0eacf8963044d6a1dd9e86dfcdd30afe1bd4a581c00910c83895a"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:395cfe09d1d39be4206fc1da96fe316f270077791f9758fcac44fd2765446dba"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac55b2b2eb068b02cbb1afbfc8b2255734e28a646d633c43a023a9b95e08023b"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4d59bb1df3814acb321f0fe87a4a6eea658463d5e59f6dc8ae10072df1205591"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:da5ea738641a7ad0ab7a8e1d8d6234639ea1e61c6eac970bbc6b94547d2c2fa7"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:72eb32a75026401777e34209694ffe64db0ce610475436647ed45589b4ab4efe"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:43bdd638b1ff27649d0ed9ed5000a8b8d754be891a8d279b27c72c03e3d12dcb"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-win32.whl", hash = "sha256:f45bdcba1dc84a1f60a8d827310f615ecbc322518c2d36bba7bf878631007152"},
+ {file = "clickhouse_connect-0.7.18-cp311-cp311-win_amd64.whl", hash = "sha256:6df629ab4b646a49a74e791e14a1b6a73ccbe6c4ee25f864522588d376b66279"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:32a35e1e63e4ae708432cbe29c8d116518d2d7b9ecb575b912444c3078b20e20"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:357529b8c08305ab895cdc898b60a3dc9b36637dfa4dbfedfc1d00548fc88edc"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2aa124d2bb65e29443779723e52398e8724e4bf56db94c9a93fd8208b9d6e2bf"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e3646254607e38294e20bf2e20b780b1c3141fb246366a1ad2021531f2c9c1b"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:433e50309af9d46d1b52e5b93ea105332565558be35296c7555c9c2753687586"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:251e67753909f76f8b136cad734501e0daf5977ed62747e18baa2b187f41c92c"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a9980916495da3ed057e56ce2c922fc23de614ea5d74ed470b8450b58902ccee"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:555e00660c04a524ea00409f783265ccd0d0192552eb9d4dc10d2aeaf2fa6575"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-win32.whl", hash = "sha256:f4770c100f0608511f7e572b63a6b222fb780fc67341c11746d361c2b03d36d3"},
+ {file = "clickhouse_connect-0.7.18-cp312-cp312-win_amd64.whl", hash = "sha256:fd44a7885d992410668d083ba38d6a268a1567f49709300b4ff84eb6aef63b70"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ac122dcabe1a9d3c14d331fade70a0adc78cf4006c8b91ee721942cdaa1190e"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e89db8e8cc9187f2e9cd6aa32062f67b3b4de7b21b8703f103e89d659eda736"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c34bb25e5ab9a97a4154d43fdcd16751c9aa4a6e6f959016e4c5fe5b692728ed"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:929441a6689a78c63c6a05ee7eb39a183601d93714835ebd537c0572101f7ab1"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8852df54b04361e57775d8ae571cd87e6983f7ed968890c62bbba6a2f2c88fd"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:56333eb772591162627455e2c21c8541ed628a9c6e7c115193ad00f24fc59440"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ac6633d2996100552d2ae47ac5e4eb551e11f69d05637ea84f1e13ac0f2bc21a"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:265085ab548fb49981fe2aef9f46652ee24d5583bf12e652abb13ee2d7e77581"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-win32.whl", hash = "sha256:5ee6c1f74df5fb19b341c389cfed7535fb627cbb9cb1a9bdcbda85045b86cd49"},
+ {file = "clickhouse_connect-0.7.18-cp38-cp38-win_amd64.whl", hash = "sha256:c7a28f810775ce68577181e752ecd2dc8caae77f288b6b9f6a7ce4d36657d4fb"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67f9a3953693b609ab068071be5ac9521193f728b29057e913b386582f84b0c2"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77e202b8606096769bf45e68b46e6bb8c78c2c451c29cb9b3a7bf505b4060d44"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8abcbd17f243ca8399a06fb08970d68e73d1ad671f84bb38518449248093f655"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:192605c2a9412e4c7d4baab85e432a58a0a5520615f05bc14f13c2836cfc6eeb"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c17108b190ab34645ee1981440ae129ecd7ca0cb6a93b4e5ce3ffc383355243f"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac1be43360a6e602784eb60547a03a6c2c574744cb8982ec15aac0e0e57709bd"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:cf403781d4ffd5a47aa7eff591940df182de4d9c423cfdc7eb6ade1a1b100e22"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:937c6481ec083e2a0bcf178ea363b72d437ab0c8fcbe65143db64b12c1e077c0"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-win32.whl", hash = "sha256:77635fea4b3fc4b1568a32674f04d35f4e648e3180528a9bb776e46e76090e4a"},
+ {file = "clickhouse_connect-0.7.18-cp39-cp39-win_amd64.whl", hash = "sha256:5ef60eb76be54b6d6bd8f189b076939e2cca16b50b92b763e7a9c7a62b488045"},
+ {file = "clickhouse_connect-0.7.18-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7bf76743d7b92b6cac6b4ef2e7a4c2d030ecf2fd542fcfccb374b2432b8d1027"},
+ {file = "clickhouse_connect-0.7.18-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65b344f174d63096eec098137b5d9c3bb545d67dd174966246c4aa80f9c0bc1e"},
+ {file = "clickhouse_connect-0.7.18-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24dcc19338cd540e6a3e32e8a7c72c5fc4930c0dd5a760f76af9d384b3e57ddc"},
+ {file = "clickhouse_connect-0.7.18-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31f5e42d5fd4eaab616926bae344c17202950d9d9c04716d46bccce6b31dbb73"},
+ {file = "clickhouse_connect-0.7.18-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a890421403c7a59ef85e3afc4ff0d641c5553c52fbb9d6ce30c0a0554649fac6"},
+ {file = "clickhouse_connect-0.7.18-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d61de71d2b82446dd66ade1b925270366c36a2b11779d5d1bcf71b1bfdd161e6"},
+ {file = "clickhouse_connect-0.7.18-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e81c4f2172e8d6f3dc4dd64ff2dc426920c0caeed969b4ec5bdd0b2fad1533e4"},
+ {file = "clickhouse_connect-0.7.18-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:092cb8e8acdcccce01d239760405fbd8c266052def49b13ad0a96814f5e521ca"},
+ {file = "clickhouse_connect-0.7.18-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1ae8b1bab7f06815abf9d833a66849faa2b9dfadcc5728fd14c494e2879afa8"},
+ {file = "clickhouse_connect-0.7.18-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e08ebec4db83109024c97ca2d25740bf57915160d7676edd5c4390777c3e3ec0"},
+ {file = "clickhouse_connect-0.7.18-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e5e42ec23b59597b512b994fec68ac1c2fa6def8594848cc3ae2459cf5e9d76a"},
+ {file = "clickhouse_connect-0.7.18-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1aad4543a1ae4d40dc815ef85031a1809fe101687380d516383b168a7407ab2"},
+ {file = "clickhouse_connect-0.7.18-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46cb4c604bd696535b1e091efb8047b833ff4220d31dbd95558c3587fda533a7"},
+ {file = "clickhouse_connect-0.7.18-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05e1ef335b81bf6b5908767c3b55e842f1f8463742992653551796eeb8f2d7d6"},
+ {file = "clickhouse_connect-0.7.18-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:094e089de4a50a170f5fd1c0ebb2ea357e055266220bb11dfd7ddf2d4e9c9123"},
]
[package.dependencies]
@@ -1970,26 +1997,6 @@ files = [
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
]
-[[package]]
-name = "dnspython"
-version = "2.6.1"
-description = "DNS toolkit"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"},
- {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"},
-]
-
-[package.extras]
-dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"]
-dnssec = ["cryptography (>=41)"]
-doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"]
-doq = ["aioquic (>=0.9.25)"]
-idna = ["idna (>=3.6)"]
-trio = ["trio (>=0.23)"]
-wmi = ["wmi (>=1.5.1)"]
-
[[package]]
name = "docstring-parser"
version = "0.16"
@@ -2094,19 +2101,42 @@ dev = ["mypy (>=1.11.0)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff
lxml = ["lxml (>=5.2.2)"]
[[package]]
-name = "email-validator"
-version = "2.2.0"
-description = "A robust email address syntax and deliverability validation library."
+name = "elastic-transport"
+version = "8.15.0"
+description = "Transport classes and utilities shared among Python Elastic client libraries"
optional = false
python-versions = ">=3.8"
files = [
- {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"},
- {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"},
+ {file = "elastic_transport-8.15.0-py3-none-any.whl", hash = "sha256:d7080d1dada2b4eee69e7574f9c17a76b42f2895eff428e562f94b0360e158c0"},
+ {file = "elastic_transport-8.15.0.tar.gz", hash = "sha256:85d62558f9baafb0868c801233a59b235e61d7b4804c28c2fadaa866b6766233"},
+]
+
+[package.dependencies]
+certifi = "*"
+urllib3 = ">=1.26.2,<3"
+
+[package.extras]
+develop = ["aiohttp", "furo", "httpx", "opentelemetry-api", "opentelemetry-sdk", "orjson", "pytest", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "pytest-mock", "requests", "respx", "sphinx (>2)", "sphinx-autodoc-typehints", "trustme"]
+
+[[package]]
+name = "elasticsearch"
+version = "8.14.0"
+description = "Python client for Elasticsearch"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "elasticsearch-8.14.0-py3-none-any.whl", hash = "sha256:cef8ef70a81af027f3da74a4f7d9296b390c636903088439087b8262a468c130"},
+ {file = "elasticsearch-8.14.0.tar.gz", hash = "sha256:aa2490029dd96f4015b333c1827aa21fd6c0a4d223b00dfb0fe933b8d09a511b"},
]
[package.dependencies]
-dnspython = ">=2.0.0"
-idna = ">=2.0.0"
+elastic-transport = ">=8.13,<9"
+
+[package.extras]
+async = ["aiohttp (>=3,<4)"]
+orjson = ["orjson (>=3)"]
+requests = ["requests (>=2.4.0,!=2.32.2,<3.0.0)"]
+vectorstore-mmr = ["numpy (>=1)", "simsimd (>=3)"]
[[package]]
name = "emoji"
@@ -2173,45 +2203,23 @@ test = ["pytest (>=6)"]
[[package]]
name = "fastapi"
-version = "0.111.1"
+version = "0.112.0"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.8"
files = [
- {file = "fastapi-0.111.1-py3-none-any.whl", hash = "sha256:4f51cfa25d72f9fbc3280832e84b32494cf186f50158d364a8765aabf22587bf"},
- {file = "fastapi-0.111.1.tar.gz", hash = "sha256:ddd1ac34cb1f76c2e2d7f8545a4bcb5463bce4834e81abf0b189e0c359ab2413"},
+ {file = "fastapi-0.112.0-py3-none-any.whl", hash = "sha256:3487ded9778006a45834b8c816ec4a48d522e2631ca9e75ec5a774f1b052f821"},
+ {file = "fastapi-0.112.0.tar.gz", hash = "sha256:d262bc56b7d101d1f4e8fc0ad2ac75bb9935fec504d2b7117686cec50710cf05"},
]
[package.dependencies]
-email_validator = ">=2.0.0"
-fastapi-cli = ">=0.0.2"
-httpx = ">=0.23.0"
-jinja2 = ">=2.11.2"
pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
-python-multipart = ">=0.0.7"
starlette = ">=0.37.2,<0.38.0"
typing-extensions = ">=4.8.0"
-uvicorn = {version = ">=0.12.0", extras = ["standard"]}
[package.extras]
-all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
-
-[[package]]
-name = "fastapi-cli"
-version = "0.0.4"
-description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "fastapi_cli-0.0.4-py3-none-any.whl", hash = "sha256:a2552f3a7ae64058cdbb530be6fa6dbfc975dc165e4fa66d224c3d396e25e809"},
- {file = "fastapi_cli-0.0.4.tar.gz", hash = "sha256:e2e9ffaffc1f7767f488d6da34b6f5a377751c996f397902eb6abb99a67bde32"},
-]
-
-[package.dependencies]
-typer = ">=0.12.3"
-
-[package.extras]
-standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"]
+all = ["email_validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
+standard = ["email_validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=2.11.2)", "python-multipart (>=0.0.7)", "uvicorn[standard] (>=0.12.0)"]
[[package]]
name = "fastavro"
@@ -3023,13 +3031,13 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
[[package]]
name = "google-cloud-resource-manager"
-version = "1.12.4"
+version = "1.12.5"
description = "Google Cloud Resource Manager API client library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google-cloud-resource-manager-1.12.4.tar.gz", hash = "sha256:3eda914a925e92465ef80faaab7e0f7a9312d486dd4e123d2c76e04bac688ff0"},
- {file = "google_cloud_resource_manager-1.12.4-py2.py3-none-any.whl", hash = "sha256:0b6663585f7f862166c0fb4c55fdda721fce4dc2dc1d5b52d03ee4bf2653a85f"},
+ {file = "google_cloud_resource_manager-1.12.5-py2.py3-none-any.whl", hash = "sha256:2708a718b45c79464b7b21559c701b5c92e6b0b1ab2146d0a256277a623dc175"},
+ {file = "google_cloud_resource_manager-1.12.5.tar.gz", hash = "sha256:b7af4254401ed4efa3aba3a929cb3ddb803fa6baf91a78485e45583597de5891"},
]
[package.dependencies]
@@ -3289,151 +3297,137 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[[package]]
name = "grpcio"
-version = "1.62.2"
+version = "1.63.0"
description = "HTTP/2-based RPC framework"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "grpcio-1.62.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:66344ea741124c38588a664237ac2fa16dfd226964cca23ddc96bd4accccbde5"},
- {file = "grpcio-1.62.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:5dab7ac2c1e7cb6179c6bfad6b63174851102cbe0682294e6b1d6f0981ad7138"},
- {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:3ad00f3f0718894749d5a8bb0fa125a7980a2f49523731a9b1fabf2b3522aa43"},
- {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e72ddfee62430ea80133d2cbe788e0d06b12f865765cb24a40009668bd8ea05"},
- {file = "grpcio-1.62.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53d3a59a10af4c2558a8e563aed9f256259d2992ae0d3037817b2155f0341de1"},
- {file = "grpcio-1.62.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1511a303f8074f67af4119275b4f954189e8313541da7b88b1b3a71425cdb10"},
- {file = "grpcio-1.62.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b94d41b7412ef149743fbc3178e59d95228a7064c5ab4760ae82b562bdffb199"},
- {file = "grpcio-1.62.2-cp310-cp310-win32.whl", hash = "sha256:a75af2fc7cb1fe25785be7bed1ab18cef959a376cdae7c6870184307614caa3f"},
- {file = "grpcio-1.62.2-cp310-cp310-win_amd64.whl", hash = "sha256:80407bc007754f108dc2061e37480238b0dc1952c855e86a4fc283501ee6bb5d"},
- {file = "grpcio-1.62.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:c1624aa686d4b36790ed1c2e2306cc3498778dffaf7b8dd47066cf819028c3ad"},
- {file = "grpcio-1.62.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:1c1bb80299bdef33309dff03932264636450c8fdb142ea39f47e06a7153d3063"},
- {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:db068bbc9b1fa16479a82e1ecf172a93874540cb84be69f0b9cb9b7ac3c82670"},
- {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2cc8a308780edbe2c4913d6a49dbdb5befacdf72d489a368566be44cadaef1a"},
- {file = "grpcio-1.62.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0695ae31a89f1a8fc8256050329a91a9995b549a88619263a594ca31b76d756"},
- {file = "grpcio-1.62.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88b4f9ee77191dcdd8810241e89340a12cbe050be3e0d5f2f091c15571cd3930"},
- {file = "grpcio-1.62.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a0204532aa2f1afd467024b02b4069246320405bc18abec7babab03e2644e75"},
- {file = "grpcio-1.62.2-cp311-cp311-win32.whl", hash = "sha256:6e784f60e575a0de554ef9251cbc2ceb8790914fe324f11e28450047f264ee6f"},
- {file = "grpcio-1.62.2-cp311-cp311-win_amd64.whl", hash = "sha256:112eaa7865dd9e6d7c0556c8b04ae3c3a2dc35d62ad3373ab7f6a562d8199200"},
- {file = "grpcio-1.62.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:65034473fc09628a02fb85f26e73885cf1ed39ebd9cf270247b38689ff5942c5"},
- {file = "grpcio-1.62.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d2c1771d0ee3cf72d69bb5e82c6a82f27fbd504c8c782575eddb7839729fbaad"},
- {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:3abe6838196da518863b5d549938ce3159d809218936851b395b09cad9b5d64a"},
- {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5ffeb269f10cedb4f33142b89a061acda9f672fd1357331dbfd043422c94e9e"},
- {file = "grpcio-1.62.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404d3b4b6b142b99ba1cff0b2177d26b623101ea2ce51c25ef6e53d9d0d87bcc"},
- {file = "grpcio-1.62.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:262cda97efdabb20853d3b5a4c546a535347c14b64c017f628ca0cc7fa780cc6"},
- {file = "grpcio-1.62.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17708db5b11b966373e21519c4c73e5a750555f02fde82276ea2a267077c68ad"},
- {file = "grpcio-1.62.2-cp312-cp312-win32.whl", hash = "sha256:b7ec9e2f8ffc8436f6b642a10019fc513722858f295f7efc28de135d336ac189"},
- {file = "grpcio-1.62.2-cp312-cp312-win_amd64.whl", hash = "sha256:aa787b83a3cd5e482e5c79be030e2b4a122ecc6c5c6c4c42a023a2b581fdf17b"},
- {file = "grpcio-1.62.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:cfd23ad29bfa13fd4188433b0e250f84ec2c8ba66b14a9877e8bce05b524cf54"},
- {file = "grpcio-1.62.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:af15e9efa4d776dfcecd1d083f3ccfb04f876d613e90ef8432432efbeeac689d"},
- {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:f4aa94361bb5141a45ca9187464ae81a92a2a135ce2800b2203134f7a1a1d479"},
- {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82af3613a219512a28ee5c95578eb38d44dd03bca02fd918aa05603c41018051"},
- {file = "grpcio-1.62.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55ddaf53474e8caeb29eb03e3202f9d827ad3110475a21245f3c7712022882a9"},
- {file = "grpcio-1.62.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79b518c56dddeec79e5500a53d8a4db90da995dfe1738c3ac57fe46348be049"},
- {file = "grpcio-1.62.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a5eb4844e5e60bf2c446ef38c5b40d7752c6effdee882f716eb57ae87255d20a"},
- {file = "grpcio-1.62.2-cp37-cp37m-win_amd64.whl", hash = "sha256:aaae70364a2d1fb238afd6cc9fcb10442b66e397fd559d3f0968d28cc3ac929c"},
- {file = "grpcio-1.62.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:1bcfe5070e4406f489e39325b76caeadab28c32bf9252d3ae960c79935a4cc36"},
- {file = "grpcio-1.62.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:da6a7b6b938c15fa0f0568e482efaae9c3af31963eec2da4ff13a6d8ec2888e4"},
- {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:41955b641c34db7d84db8d306937b72bc4968eef1c401bea73081a8d6c3d8033"},
- {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c772f225483905f675cb36a025969eef9712f4698364ecd3a63093760deea1bc"},
- {file = "grpcio-1.62.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07ce1f775d37ca18c7a141300e5b71539690efa1f51fe17f812ca85b5e73262f"},
- {file = "grpcio-1.62.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26f415f40f4a93579fd648f48dca1c13dfacdfd0290f4a30f9b9aeb745026811"},
- {file = "grpcio-1.62.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:db707e3685ff16fc1eccad68527d072ac8bdd2e390f6daa97bc394ea7de4acea"},
- {file = "grpcio-1.62.2-cp38-cp38-win32.whl", hash = "sha256:589ea8e75de5fd6df387de53af6c9189c5231e212b9aa306b6b0d4f07520fbb9"},
- {file = "grpcio-1.62.2-cp38-cp38-win_amd64.whl", hash = "sha256:3c3ed41f4d7a3aabf0f01ecc70d6b5d00ce1800d4af652a549de3f7cf35c4abd"},
- {file = "grpcio-1.62.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:162ccf61499c893831b8437120600290a99c0bc1ce7b51f2c8d21ec87ff6af8b"},
- {file = "grpcio-1.62.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:f27246d7da7d7e3bd8612f63785a7b0c39a244cf14b8dd9dd2f2fab939f2d7f1"},
- {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:2507006c8a478f19e99b6fe36a2464696b89d40d88f34e4b709abe57e1337467"},
- {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a90ac47a8ce934e2c8d71e317d2f9e7e6aaceb2d199de940ce2c2eb611b8c0f4"},
- {file = "grpcio-1.62.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99701979bcaaa7de8d5f60476487c5df8f27483624f1f7e300ff4669ee44d1f2"},
- {file = "grpcio-1.62.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:af7dc3f7a44f10863b1b0ecab4078f0a00f561aae1edbd01fd03ad4dcf61c9e9"},
- {file = "grpcio-1.62.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fa63245271920786f4cb44dcada4983a3516be8f470924528cf658731864c14b"},
- {file = "grpcio-1.62.2-cp39-cp39-win32.whl", hash = "sha256:c6ad9c39704256ed91a1cffc1379d63f7d0278d6a0bad06b0330f5d30291e3a3"},
- {file = "grpcio-1.62.2-cp39-cp39-win_amd64.whl", hash = "sha256:16da954692fd61aa4941fbeda405a756cd96b97b5d95ca58a92547bba2c1624f"},
- {file = "grpcio-1.62.2.tar.gz", hash = "sha256:c77618071d96b7a8be2c10701a98537823b9c65ba256c0b9067e0594cdbd954d"},
-]
-
-[package.extras]
-protobuf = ["grpcio-tools (>=1.62.2)"]
+ {file = "grpcio-1.63.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:2e93aca840c29d4ab5db93f94ed0a0ca899e241f2e8aec6334ab3575dc46125c"},
+ {file = "grpcio-1.63.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:91b73d3f1340fefa1e1716c8c1ec9930c676d6b10a3513ab6c26004cb02d8b3f"},
+ {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b3afbd9d6827fa6f475a4f91db55e441113f6d3eb9b7ebb8fb806e5bb6d6bd0d"},
+ {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f3f6883ce54a7a5f47db43289a0a4c776487912de1a0e2cc83fdaec9685cc9f"},
+ {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf8dae9cc0412cb86c8de5a8f3be395c5119a370f3ce2e69c8b7d46bb9872c8d"},
+ {file = "grpcio-1.63.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:08e1559fd3b3b4468486b26b0af64a3904a8dbc78d8d936af9c1cf9636eb3e8b"},
+ {file = "grpcio-1.63.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5c039ef01516039fa39da8a8a43a95b64e288f79f42a17e6c2904a02a319b357"},
+ {file = "grpcio-1.63.0-cp310-cp310-win32.whl", hash = "sha256:ad2ac8903b2eae071055a927ef74121ed52d69468e91d9bcbd028bd0e554be6d"},
+ {file = "grpcio-1.63.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2e44f59316716532a993ca2966636df6fbe7be4ab6f099de6815570ebe4383a"},
+ {file = "grpcio-1.63.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:f28f8b2db7b86c77916829d64ab21ff49a9d8289ea1564a2b2a3a8ed9ffcccd3"},
+ {file = "grpcio-1.63.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:65bf975639a1f93bee63ca60d2e4951f1b543f498d581869922910a476ead2f5"},
+ {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:b5194775fec7dc3dbd6a935102bb156cd2c35efe1685b0a46c67b927c74f0cfb"},
+ {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4cbb2100ee46d024c45920d16e888ee5d3cf47c66e316210bc236d5bebc42b3"},
+ {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff737cf29b5b801619f10e59b581869e32f400159e8b12d7a97e7e3bdeee6a2"},
+ {file = "grpcio-1.63.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd1e68776262dd44dedd7381b1a0ad09d9930ffb405f737d64f505eb7f77d6c7"},
+ {file = "grpcio-1.63.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f45f27f516548e23e4ec3fbab21b060416007dbe768a111fc4611464cc773f"},
+ {file = "grpcio-1.63.0-cp311-cp311-win32.whl", hash = "sha256:878b1d88d0137df60e6b09b74cdb73db123f9579232c8456f53e9abc4f62eb3c"},
+ {file = "grpcio-1.63.0-cp311-cp311-win_amd64.whl", hash = "sha256:756fed02dacd24e8f488f295a913f250b56b98fb793f41d5b2de6c44fb762434"},
+ {file = "grpcio-1.63.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:93a46794cc96c3a674cdfb59ef9ce84d46185fe9421baf2268ccb556f8f81f57"},
+ {file = "grpcio-1.63.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a7b19dfc74d0be7032ca1eda0ed545e582ee46cd65c162f9e9fc6b26ef827dc6"},
+ {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:8064d986d3a64ba21e498b9a376cbc5d6ab2e8ab0e288d39f266f0fca169b90d"},
+ {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:219bb1848cd2c90348c79ed0a6b0ea51866bc7e72fa6e205e459fedab5770172"},
+ {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d60cd1d58817bc5985fae6168d8b5655c4981d448d0f5b6194bbcc038090d2"},
+ {file = "grpcio-1.63.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e350cb096e5c67832e9b6e018cf8a0d2a53b2a958f6251615173165269a91b0"},
+ {file = "grpcio-1.63.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:56cdf96ff82e3cc90dbe8bac260352993f23e8e256e063c327b6cf9c88daf7a9"},
+ {file = "grpcio-1.63.0-cp312-cp312-win32.whl", hash = "sha256:3a6d1f9ea965e750db7b4ee6f9fdef5fdf135abe8a249e75d84b0a3e0c668a1b"},
+ {file = "grpcio-1.63.0-cp312-cp312-win_amd64.whl", hash = "sha256:d2497769895bb03efe3187fb1888fc20e98a5f18b3d14b606167dacda5789434"},
+ {file = "grpcio-1.63.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fdf348ae69c6ff484402cfdb14e18c1b0054ac2420079d575c53a60b9b2853ae"},
+ {file = "grpcio-1.63.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a3abfe0b0f6798dedd2e9e92e881d9acd0fdb62ae27dcbbfa7654a57e24060c0"},
+ {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:6ef0ad92873672a2a3767cb827b64741c363ebaa27e7f21659e4e31f4d750280"},
+ {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b416252ac5588d9dfb8a30a191451adbf534e9ce5f56bb02cd193f12d8845b7f"},
+ {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3b77eaefc74d7eb861d3ffbdf91b50a1bb1639514ebe764c47773b833fa2d91"},
+ {file = "grpcio-1.63.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b005292369d9c1f80bf70c1db1c17c6c342da7576f1c689e8eee4fb0c256af85"},
+ {file = "grpcio-1.63.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cdcda1156dcc41e042d1e899ba1f5c2e9f3cd7625b3d6ebfa619806a4c1aadda"},
+ {file = "grpcio-1.63.0-cp38-cp38-win32.whl", hash = "sha256:01799e8649f9e94ba7db1aeb3452188048b0019dc37696b0f5ce212c87c560c3"},
+ {file = "grpcio-1.63.0-cp38-cp38-win_amd64.whl", hash = "sha256:6a1a3642d76f887aa4009d92f71eb37809abceb3b7b5a1eec9c554a246f20e3a"},
+ {file = "grpcio-1.63.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:75f701ff645858a2b16bc8c9fc68af215a8bb2d5a9b647448129de6e85d52bce"},
+ {file = "grpcio-1.63.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cacdef0348a08e475a721967f48206a2254a1b26ee7637638d9e081761a5ba86"},
+ {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:0697563d1d84d6985e40ec5ec596ff41b52abb3fd91ec240e8cb44a63b895094"},
+ {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6426e1fb92d006e47476d42b8f240c1d916a6d4423c5258ccc5b105e43438f61"},
+ {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48cee31bc5f5a31fb2f3b573764bd563aaa5472342860edcc7039525b53e46a"},
+ {file = "grpcio-1.63.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:50344663068041b34a992c19c600236e7abb42d6ec32567916b87b4c8b8833b3"},
+ {file = "grpcio-1.63.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:259e11932230d70ef24a21b9fb5bb947eb4703f57865a404054400ee92f42f5d"},
+ {file = "grpcio-1.63.0-cp39-cp39-win32.whl", hash = "sha256:a44624aad77bf8ca198c55af811fd28f2b3eaf0a50ec5b57b06c034416ef2d0a"},
+ {file = "grpcio-1.63.0-cp39-cp39-win_amd64.whl", hash = "sha256:166e5c460e5d7d4656ff9e63b13e1f6029b122104c1633d5f37eaea348d7356d"},
+ {file = "grpcio-1.63.0.tar.gz", hash = "sha256:f3023e14805c61bc439fb40ca545ac3d5740ce66120a678a3c6c2c55b70343d1"},
+]
+
+[package.extras]
+protobuf = ["grpcio-tools (>=1.63.0)"]
[[package]]
name = "grpcio-status"
-version = "1.62.2"
+version = "1.62.3"
description = "Status proto mapping for gRPC"
optional = false
python-versions = ">=3.6"
files = [
- {file = "grpcio-status-1.62.2.tar.gz", hash = "sha256:62e1bfcb02025a1cd73732a2d33672d3e9d0df4d21c12c51e0bbcaf09bab742a"},
- {file = "grpcio_status-1.62.2-py3-none-any.whl", hash = "sha256:206ddf0eb36bc99b033f03b2c8e95d319f0044defae9b41ae21408e7e0cda48f"},
+ {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"},
+ {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
-grpcio = ">=1.62.2"
+grpcio = ">=1.62.3"
protobuf = ">=4.21.6"
[[package]]
name = "grpcio-tools"
-version = "1.62.2"
+version = "1.62.3"
description = "Protobuf code generator for gRPC"
optional = false
python-versions = ">=3.7"
files = [
- {file = "grpcio-tools-1.62.2.tar.gz", hash = "sha256:5fd5e1582b678e6b941ee5f5809340be5e0724691df5299aae8226640f94e18f"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:1679b4903aed2dc5bd8cb22a452225b05dc8470a076f14fd703581efc0740cdb"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:9d41e0e47dd075c075bb8f103422968a65dd0d8dc8613288f573ae91eb1053ba"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:987e774f74296842bbffd55ea8826370f70c499e5b5f71a8cf3103838b6ee9c3"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40cd4eeea4b25bcb6903b82930d579027d034ba944393c4751cdefd9c49e6989"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6746bc823958499a3cf8963cc1de00072962fb5e629f26d658882d3f4c35095"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2ed775e844566ce9ce089be9a81a8b928623b8ee5820f5e4d58c1a9d33dfc5ae"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bdc5dd3f57b5368d5d661d5d3703bcaa38bceca59d25955dff66244dbc987271"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-win32.whl", hash = "sha256:3a8d6f07e64c0c7756f4e0c4781d9d5a2b9cc9cbd28f7032a6fb8d4f847d0445"},
- {file = "grpcio_tools-1.62.2-cp310-cp310-win_amd64.whl", hash = "sha256:e33b59fb3efdddeb97ded988a871710033e8638534c826567738d3edce528752"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:472505d030135d73afe4143b0873efe0dcb385bd6d847553b4f3afe07679af00"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:ec674b4440ef4311ac1245a709e87b36aca493ddc6850eebe0b278d1f2b6e7d1"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:184b4174d4bd82089d706e8223e46c42390a6ebac191073b9772abc77308f9fa"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c195d74fe98541178ece7a50dad2197d43991e0f77372b9a88da438be2486f12"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a34d97c62e61bfe9e6cff0410fe144ac8cca2fc979ad0be46b7edf026339d161"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cbb8453ae83a1db2452b7fe0f4b78e4a8dd32be0f2b2b73591ae620d4d784d3d"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f989e5cebead3ae92c6abf6bf7b19949e1563a776aea896ac5933f143f0c45d"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-win32.whl", hash = "sha256:c48fabe40b9170f4e3d7dd2c252e4f1ff395dc24e49ac15fc724b1b6f11724da"},
- {file = "grpcio_tools-1.62.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c616d0ad872e3780693fce6a3ac8ef00fc0963e6d7815ce9dcfae68ba0fc287"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:10cc3321704ecd17c93cf68c99c35467a8a97ffaaed53207e9b2da6ae0308ee1"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:9be84ff6d47fd61462be7523b49d7ba01adf67ce4e1447eae37721ab32464dd8"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:d82f681c9a9d933a9d8068e8e382977768e7779ddb8870fa0cf918d8250d1532"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04c607029ae3660fb1624ed273811ffe09d57d84287d37e63b5b802a35897329"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72b61332f1b439c14cbd3815174a8f1d35067a02047c32decd406b3a09bb9890"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8214820990d01b52845f9fbcb92d2b7384a0c321b303e3ac614c219dc7d1d3af"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:462e0ab8dd7c7b70bfd6e3195eebc177549ede5cf3189814850c76f9a340d7ce"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-win32.whl", hash = "sha256:fa107460c842e4c1a6266150881694fefd4f33baa544ea9489601810c2210ef8"},
- {file = "grpcio_tools-1.62.2-cp312-cp312-win_amd64.whl", hash = "sha256:759c60f24c33a181bbbc1232a6752f9b49fbb1583312a4917e2b389fea0fb0f2"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:45db5da2bcfa88f2b86b57ef35daaae85c60bd6754a051d35d9449c959925b57"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:ab84bae88597133f6ea7a2bdc57b2fda98a266fe8d8d4763652cbefd20e73ad7"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7a49bccae1c7d154b78e991885c3111c9ad8c8fa98e91233de425718f47c6139"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7e439476b29d6dac363b321781a113794397afceeb97dad85349db5f1cb5e9a"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ea369c4d1567d1acdf69c8ea74144f4ccad9e545df7f9a4fc64c94fa7684ba3"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f955702dc4b530696375251319d05223b729ed24e8673c2129f7a75d2caefbb"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3708a747aa4b6b505727282ca887041174e146ae030ebcadaf4c1d346858df62"},
- {file = "grpcio_tools-1.62.2-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce149ea55eadb486a7fb75a20f63ef3ac065ee6a0240ed25f3549ce7954c653"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:58cbb24b3fa6ae35aa9c210fcea3a51aa5fef0cd25618eb4fd94f746d5a9b703"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:6413581e14a80e0b4532577766cf0586de4dd33766a31b3eb5374a746771c07d"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:47117c8a7e861382470d0e22d336e5a91fdc5f851d1db44fa784b9acea190d87"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9f1ba79a253df9e553d20319c615fa2b429684580fa042dba618d7f6649ac7e4"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04a394cf5e51ba9be412eb9f6c482b6270bd81016e033e8eb7d21b8cc28fe8b5"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3c53b221378b035ae2f1881cbc3aca42a6075a8e90e1a342c2f205eb1d1aa6a1"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c384c838b34d1b67068e51b5bbe49caa6aa3633acd158f1ab16b5da8d226bc53"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-win32.whl", hash = "sha256:19ea69e41c3565932aa28a202d1875ec56786aea46a2eab54a3b28e8a27f9517"},
- {file = "grpcio_tools-1.62.2-cp38-cp38-win_amd64.whl", hash = "sha256:1d768a5c07279a4c461ebf52d0cec1c6ca85c6291c71ec2703fe3c3e7e28e8c4"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:5b07b5874187e170edfbd7aa2ca3a54ebf3b2952487653e8c0b0d83601c33035"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:d58389fe8be206ddfb4fa703db1e24c956856fcb9a81da62b13577b3a8f7fda7"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:7d8b4e00c3d7237b92260fc18a561cd81f1da82e8be100db1b7d816250defc66"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe08d2038f2b7c53259b5c49e0ad08c8e0ce2b548d8185993e7ef67e8592cca"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19216e1fb26dbe23d12a810517e1b3fbb8d4f98b1a3fbebeec9d93a79f092de4"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b8574469ecc4ff41d6bb95f44e0297cdb0d95bade388552a9a444db9cd7485cd"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4f6f32d39283ea834a493fccf0ebe9cfddee7577bdcc27736ad4be1732a36399"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-win32.whl", hash = "sha256:76eb459bdf3fb666e01883270beee18f3f11ed44488486b61cd210b4e0e17cc1"},
- {file = "grpcio_tools-1.62.2-cp39-cp39-win_amd64.whl", hash = "sha256:217c2ee6a7ce519a55958b8622e21804f6fdb774db08c322f4c9536c35fdce7c"},
-]
-
-[package.dependencies]
-grpcio = ">=1.62.2"
+ {file = "grpcio-tools-1.62.3.tar.gz", hash = "sha256:7c7136015c3d62c3eef493efabaf9e3380e3e66d24ee8e94c01cb71377f57833"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2f968b049c2849540751ec2100ab05e8086c24bead769ca734fdab58698408c1"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0a8c0c4724ae9c2181b7dbc9b186df46e4f62cb18dc184e46d06c0ebeccf569e"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5782883a27d3fae8c425b29a9d3dcf5f47d992848a1b76970da3b5a28d424b26"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d812daffd0c2d2794756bd45a353f89e55dc8f91eb2fc840c51b9f6be62667"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b47d0dda1bdb0a0ba7a9a6de88e5a1ed61f07fad613964879954961e36d49193"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca246dffeca0498be9b4e1ee169b62e64694b0f92e6d0be2573e65522f39eea9"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-win32.whl", hash = "sha256:6a56d344b0bab30bf342a67e33d386b0b3c4e65868ffe93c341c51e1a8853ca5"},
+ {file = "grpcio_tools-1.62.3-cp310-cp310-win_amd64.whl", hash = "sha256:710fecf6a171dcbfa263a0a3e7070e0df65ba73158d4c539cec50978f11dad5d"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:703f46e0012af83a36082b5f30341113474ed0d91e36640da713355cd0ea5d23"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7cc83023acd8bc72cf74c2edbe85b52098501d5b74d8377bfa06f3e929803492"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ff7d58a45b75df67d25f8f144936a3e44aabd91afec833ee06826bd02b7fbe7"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f2483ea232bd72d98a6dc6d7aefd97e5bc80b15cd909b9e356d6f3e326b6e43"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:962c84b4da0f3b14b3cdb10bc3837ebc5f136b67d919aea8d7bb3fd3df39528a"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8ad0473af5544f89fc5a1ece8676dd03bdf160fb3230f967e05d0f4bf89620e3"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-win32.whl", hash = "sha256:db3bc9fa39afc5e4e2767da4459df82b095ef0cab2f257707be06c44a1c2c3e5"},
+ {file = "grpcio_tools-1.62.3-cp311-cp311-win_amd64.whl", hash = "sha256:e0898d412a434e768a0c7e365acabe13ff1558b767e400936e26b5b6ed1ee51f"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d102b9b21c4e1e40af9a2ab3c6d41afba6bd29c0aa50ca013bf85c99cdc44ac5"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0a52cc9444df978438b8d2332c0ca99000521895229934a59f94f37ed896b133"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141d028bf5762d4a97f981c501da873589df3f7e02f4c1260e1921e565b376fa"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47a5c093ab256dec5714a7a345f8cc89315cb57c298b276fa244f37a0ba507f0"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f6831fdec2b853c9daa3358535c55eed3694325889aa714070528cf8f92d7d6d"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e02d7c1a02e3814c94ba0cfe43d93e872c758bd8fd5c2797f894d0c49b4a1dfc"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-win32.whl", hash = "sha256:b881fd9505a84457e9f7e99362eeedd86497b659030cf57c6f0070df6d9c2b9b"},
+ {file = "grpcio_tools-1.62.3-cp312-cp312-win_amd64.whl", hash = "sha256:11c625eebefd1fd40a228fc8bae385e448c7e32a6ae134e43cf13bbc23f902b7"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:ec6fbded0c61afe6f84e3c2a43e6d656791d95747d6d28b73eff1af64108c434"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:bfda6ee8990997a9df95c5606f3096dae65f09af7ca03a1e9ca28f088caca5cf"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b77f9f9cee87cd798f0fe26b7024344d1b03a7cd2d2cba7035f8433b13986325"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e02d3b96f2d0e4bab9ceaa30f37d4f75571e40c6272e95364bff3125a64d184"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1da38070738da53556a4b35ab67c1b9884a5dd48fa2f243db35dc14079ea3d0c"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ace43b26d88a58dcff16c20d23ff72b04d0a415f64d2820f4ff06b1166f50557"},
+ {file = "grpcio_tools-1.62.3-cp37-cp37m-win_amd64.whl", hash = "sha256:350a80485e302daaa95d335a931f97b693e170e02d43767ab06552c708808950"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:c3a1ac9d394f8e229eb28eec2e04b9a6f5433fa19c9d32f1cb6066e3c5114a1d"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:11f363570dea661dde99e04a51bd108a5807b5df32a6f8bdf4860e34e94a4dbf"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9ad9950119d8ae27634e68b7663cc8d340ae535a0f80d85a55e56a6973ab1f"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c5d22b252dcef11dd1e0fbbe5bbfb9b4ae048e8880d33338215e8ccbdb03edc"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:27cd9ef5c5d68d5ed104b6dcb96fe9c66b82050e546c9e255716903c3d8f0373"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f4b1615adf67bd8bb71f3464146a6f9949972d06d21a4f5e87e73f6464d97f57"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-win32.whl", hash = "sha256:e18e15287c31baf574fcdf8251fb7f997d64e96c6ecf467906e576da0a079af6"},
+ {file = "grpcio_tools-1.62.3-cp38-cp38-win_amd64.whl", hash = "sha256:6c3064610826f50bd69410c63101954676edc703e03f9e8f978a135f1aaf97c1"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:8e62cc7164b0b7c5128e637e394eb2ef3db0e61fc798e80c301de3b2379203ed"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c8ad5cce554e2fcaf8842dee5d9462583b601a3a78f8b76a153c38c963f58c10"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec279dcf3518201fc592c65002754f58a6b542798cd7f3ecd4af086422f33f29"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c989246c2aebc13253f08be32538a4039a64e12d9c18f6d662d7aee641dc8b5"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca4f5eeadbb57cf03317d6a2857823239a63a59cc935f5bd6cf6e8b7af7a7ecc"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0cb3a3436ac119cbd37a7d3331d9bdf85dad21a6ac233a3411dff716dcbf401e"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-win32.whl", hash = "sha256:3eae6ea76d62fcac091e1f15c2dcedf1dc3f114f8df1a972a8a0745e89f4cf61"},
+ {file = "grpcio_tools-1.62.3-cp39-cp39-win_amd64.whl", hash = "sha256:eec73a005443061f4759b71a056f745e3b000dc0dc125c9f20560232dfbcbd14"},
+]
+
+[package.dependencies]
+grpcio = ">=1.62.3"
protobuf = ">=4.21.6,<5.0dev"
setuptools = "*"
@@ -3798,22 +3792,22 @@ files = [
[[package]]
name = "importlib-metadata"
-version = "7.1.0"
+version = "8.0.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"},
- {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"},
+ {file = "importlib_metadata-8.0.0-py3-none-any.whl", hash = "sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f"},
+ {file = "importlib_metadata-8.0.0.tar.gz", hash = "sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812"},
]
[package.dependencies]
zipp = ">=0.5"
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
-testing = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
+test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "importlib-resources"
@@ -4069,28 +4063,28 @@ files = [
[[package]]
name = "kombu"
-version = "5.3.7"
+version = "5.4.0"
description = "Messaging library for Python."
optional = false
python-versions = ">=3.8"
files = [
- {file = "kombu-5.3.7-py3-none-any.whl", hash = "sha256:5634c511926309c7f9789f1433e9ed402616b56836ef9878f01bd59267b4c7a9"},
- {file = "kombu-5.3.7.tar.gz", hash = "sha256:011c4cd9a355c14a1de8d35d257314a1d2456d52b7140388561acac3cf1a97bf"},
+ {file = "kombu-5.4.0-py3-none-any.whl", hash = "sha256:c8dd99820467610b4febbc7a9e8a0d3d7da2d35116b67184418b51cc520ea6b6"},
+ {file = "kombu-5.4.0.tar.gz", hash = "sha256:ad200a8dbdaaa2bbc5f26d2ee7d707d9a1fded353a0f4bd751ce8c7d9f449c60"},
]
[package.dependencies]
amqp = ">=5.1.1,<6.0.0"
-vine = "*"
+vine = "5.1.0"
[package.extras]
azureservicebus = ["azure-servicebus (>=7.10.0)"]
azurestoragequeues = ["azure-identity (>=1.12.0)", "azure-storage-queue (>=12.6.0)"]
confluentkafka = ["confluent-kafka (>=2.2.0)"]
-consul = ["python-consul2"]
+consul = ["python-consul2 (==0.1.5)"]
librabbitmq = ["librabbitmq (>=2.0.0)"]
mongodb = ["pymongo (>=4.1.1)"]
-msgpack = ["msgpack"]
-pyro = ["pyro4"]
+msgpack = ["msgpack (==1.0.8)"]
+pyro = ["pyro4 (==4.82)"]
qpid = ["qpid-python (>=0.26)", "qpid-tools (>=0.26)"]
redis = ["redis (>=4.5.2,!=4.5.5,!=5.0.2)"]
slmq = ["softlayer-messaging (>=1.0.3)"]
@@ -4141,13 +4135,13 @@ six = "*"
[[package]]
name = "langfuse"
-version = "2.39.3"
+version = "2.42.1"
description = "A client library for accessing langfuse"
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
- {file = "langfuse-2.39.3-py3-none-any.whl", hash = "sha256:24b12cbb23f866b22706c1ea9631781f99fe37b0b15889d241198c4d1c07516b"},
- {file = "langfuse-2.39.3.tar.gz", hash = "sha256:4d2df8f9344572370703db103ddf97176df518699593254e6d6c2b8ca3bf2f12"},
+ {file = "langfuse-2.42.1-py3-none-any.whl", hash = "sha256:8895d9645aea91815db51565f90e110a76d5e157a7b12eaf1cd6959e7aaa2263"},
+ {file = "langfuse-2.42.1.tar.gz", hash = "sha256:f89faf1c14308d488c90f8b7d0368fff3d259f80ffe34d169b9cfc3f0dbfab82"},
]
[package.dependencies]
@@ -4166,13 +4160,13 @@ openai = ["openai (>=0.27.8)"]
[[package]]
name = "langsmith"
-version = "0.1.93"
+version = "0.1.98"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
- {file = "langsmith-0.1.93-py3-none-any.whl", hash = "sha256:811210b9d5f108f36431bd7b997eb9476a9ecf5a2abd7ddbb606c1cdcf0f43ce"},
- {file = "langsmith-0.1.93.tar.gz", hash = "sha256:285b6ad3a54f50fa8eb97b5f600acc57d0e37e139dd8cf2111a117d0435ba9b4"},
+ {file = "langsmith-0.1.98-py3-none-any.whl", hash = "sha256:f79e8a128652bbcee4606d10acb6236973b5cd7dde76e3741186d3b97b5698e9"},
+ {file = "langsmith-0.1.98.tar.gz", hash = "sha256:e07678219a0502e8f26d35294e72127a39d25e32fafd091af5a7bb661e9a6bd1"},
]
[package.dependencies]
@@ -4215,96 +4209,161 @@ files = [
[[package]]
name = "lxml"
-version = "5.1.0"
+version = "5.2.2"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
optional = false
python-versions = ">=3.6"
files = [
- {file = "lxml-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:704f5572ff473a5f897745abebc6df40f22d4133c1e0a1f124e4f2bd3330ff7e"},
- {file = "lxml-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9d3c0f8567ffe7502d969c2c1b809892dc793b5d0665f602aad19895f8d508da"},
- {file = "lxml-5.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5fcfbebdb0c5d8d18b84118842f31965d59ee3e66996ac842e21f957eb76138c"},
- {file = "lxml-5.1.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f37c6d7106a9d6f0708d4e164b707037b7380fcd0b04c5bd9cae1fb46a856fb"},
- {file = "lxml-5.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2befa20a13f1a75c751f47e00929fb3433d67eb9923c2c0b364de449121f447c"},
- {file = "lxml-5.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22b7ee4c35f374e2c20337a95502057964d7e35b996b1c667b5c65c567d2252a"},
- {file = "lxml-5.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf8443781533b8d37b295016a4b53c1494fa9a03573c09ca5104550c138d5c05"},
- {file = "lxml-5.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:82bddf0e72cb2af3cbba7cec1d2fd11fda0de6be8f4492223d4a268713ef2147"},
- {file = "lxml-5.1.0-cp310-cp310-win32.whl", hash = "sha256:b66aa6357b265670bb574f050ffceefb98549c721cf28351b748be1ef9577d93"},
- {file = "lxml-5.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:4946e7f59b7b6a9e27bef34422f645e9a368cb2be11bf1ef3cafc39a1f6ba68d"},
- {file = "lxml-5.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:14deca1460b4b0f6b01f1ddc9557704e8b365f55c63070463f6c18619ebf964f"},
- {file = "lxml-5.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed8c3d2cd329bf779b7ed38db176738f3f8be637bb395ce9629fc76f78afe3d4"},
- {file = "lxml-5.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:436a943c2900bb98123b06437cdd30580a61340fbdb7b28aaf345a459c19046a"},
- {file = "lxml-5.1.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acb6b2f96f60f70e7f34efe0c3ea34ca63f19ca63ce90019c6cbca6b676e81fa"},
- {file = "lxml-5.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af8920ce4a55ff41167ddbc20077f5698c2e710ad3353d32a07d3264f3a2021e"},
- {file = "lxml-5.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cfced4a069003d8913408e10ca8ed092c49a7f6cefee9bb74b6b3e860683b45"},
- {file = "lxml-5.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9e5ac3437746189a9b4121db2a7b86056ac8786b12e88838696899328fc44bb2"},
- {file = "lxml-5.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4c9bda132ad108b387c33fabfea47866af87f4ea6ffb79418004f0521e63204"},
- {file = "lxml-5.1.0-cp311-cp311-win32.whl", hash = "sha256:bc64d1b1dab08f679fb89c368f4c05693f58a9faf744c4d390d7ed1d8223869b"},
- {file = "lxml-5.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:a5ab722ae5a873d8dcee1f5f45ddd93c34210aed44ff2dc643b5025981908cda"},
- {file = "lxml-5.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9aa543980ab1fbf1720969af1d99095a548ea42e00361e727c58a40832439114"},
- {file = "lxml-5.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6f11b77ec0979f7e4dc5ae081325a2946f1fe424148d3945f943ceaede98adb8"},
- {file = "lxml-5.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a36c506e5f8aeb40680491d39ed94670487ce6614b9d27cabe45d94cd5d63e1e"},
- {file = "lxml-5.1.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f643ffd2669ffd4b5a3e9b41c909b72b2a1d5e4915da90a77e119b8d48ce867a"},
- {file = "lxml-5.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16dd953fb719f0ffc5bc067428fc9e88f599e15723a85618c45847c96f11f431"},
- {file = "lxml-5.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16018f7099245157564d7148165132c70adb272fb5a17c048ba70d9cc542a1a1"},
- {file = "lxml-5.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:82cd34f1081ae4ea2ede3d52f71b7be313756e99b4b5f829f89b12da552d3aa3"},
- {file = "lxml-5.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:19a1bc898ae9f06bccb7c3e1dfd73897ecbbd2c96afe9095a6026016e5ca97b8"},
- {file = "lxml-5.1.0-cp312-cp312-win32.whl", hash = "sha256:13521a321a25c641b9ea127ef478b580b5ec82aa2e9fc076c86169d161798b01"},
- {file = "lxml-5.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ad17c20e3666c035db502c78b86e58ff6b5991906e55bdbef94977700c72623"},
- {file = "lxml-5.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:24ef5a4631c0b6cceaf2dbca21687e29725b7c4e171f33a8f8ce23c12558ded1"},
- {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d2900b7f5318bc7ad8631d3d40190b95ef2aa8cc59473b73b294e4a55e9f30f"},
- {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:601f4a75797d7a770daed8b42b97cd1bb1ba18bd51a9382077a6a247a12aa38d"},
- {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4b68c961b5cc402cbd99cca5eb2547e46ce77260eb705f4d117fd9c3f932b95"},
- {file = "lxml-5.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:afd825e30f8d1f521713a5669b63657bcfe5980a916c95855060048b88e1adb7"},
- {file = "lxml-5.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:262bc5f512a66b527d026518507e78c2f9c2bd9eb5c8aeeb9f0eb43fcb69dc67"},
- {file = "lxml-5.1.0-cp36-cp36m-win32.whl", hash = "sha256:e856c1c7255c739434489ec9c8aa9cdf5179785d10ff20add308b5d673bed5cd"},
- {file = "lxml-5.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c7257171bb8d4432fe9d6fdde4d55fdbe663a63636a17f7f9aaba9bcb3153ad7"},
- {file = "lxml-5.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b9e240ae0ba96477682aa87899d94ddec1cc7926f9df29b1dd57b39e797d5ab5"},
- {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a96f02ba1bcd330807fc060ed91d1f7a20853da6dd449e5da4b09bfcc08fdcf5"},
- {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3898ae2b58eeafedfe99e542a17859017d72d7f6a63de0f04f99c2cb125936"},
- {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61c5a7edbd7c695e54fca029ceb351fc45cd8860119a0f83e48be44e1c464862"},
- {file = "lxml-5.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3aeca824b38ca78d9ee2ab82bd9883083d0492d9d17df065ba3b94e88e4d7ee6"},
- {file = "lxml-5.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8f52fe6859b9db71ee609b0c0a70fea5f1e71c3462ecf144ca800d3f434f0764"},
- {file = "lxml-5.1.0-cp37-cp37m-win32.whl", hash = "sha256:d42e3a3fc18acc88b838efded0e6ec3edf3e328a58c68fbd36a7263a874906c8"},
- {file = "lxml-5.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:eac68f96539b32fce2c9b47eb7c25bb2582bdaf1bbb360d25f564ee9e04c542b"},
- {file = "lxml-5.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ae15347a88cf8af0949a9872b57a320d2605ae069bcdf047677318bc0bba45b1"},
- {file = "lxml-5.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c26aab6ea9c54d3bed716b8851c8bfc40cb249b8e9880e250d1eddde9f709bf5"},
- {file = "lxml-5.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:342e95bddec3a698ac24378d61996b3ee5ba9acfeb253986002ac53c9a5f6f84"},
- {file = "lxml-5.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:725e171e0b99a66ec8605ac77fa12239dbe061482ac854d25720e2294652eeaa"},
- {file = "lxml-5.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d184e0d5c918cff04cdde9dbdf9600e960161d773666958c9d7b565ccc60c45"},
- {file = "lxml-5.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:98f3f020a2b736566c707c8e034945c02aa94e124c24f77ca097c446f81b01f1"},
- {file = "lxml-5.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d48fc57e7c1e3df57be5ae8614bab6d4e7b60f65c5457915c26892c41afc59e"},
- {file = "lxml-5.1.0-cp38-cp38-win32.whl", hash = "sha256:7ec465e6549ed97e9f1e5ed51c657c9ede767bc1c11552f7f4d022c4df4a977a"},
- {file = "lxml-5.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:b21b4031b53d25b0858d4e124f2f9131ffc1530431c6d1321805c90da78388d1"},
- {file = "lxml-5.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:52427a7eadc98f9e62cb1368a5079ae826f94f05755d2d567d93ee1bc3ceb354"},
- {file = "lxml-5.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6a2a2c724d97c1eb8cf966b16ca2915566a4904b9aad2ed9a09c748ffe14f969"},
- {file = "lxml-5.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:843b9c835580d52828d8f69ea4302537337a21e6b4f1ec711a52241ba4a824f3"},
- {file = "lxml-5.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b99f564659cfa704a2dd82d0684207b1aadf7d02d33e54845f9fc78e06b7581"},
- {file = "lxml-5.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f8b0c78e7aac24979ef09b7f50da871c2de2def043d468c4b41f512d831e912"},
- {file = "lxml-5.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bcf86dfc8ff3e992fed847c077bd875d9e0ba2fa25d859c3a0f0f76f07f0c8d"},
- {file = "lxml-5.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:49a9b4af45e8b925e1cd6f3b15bbba2c81e7dba6dce170c677c9cda547411e14"},
- {file = "lxml-5.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:280f3edf15c2a967d923bcfb1f8f15337ad36f93525828b40a0f9d6c2ad24890"},
- {file = "lxml-5.1.0-cp39-cp39-win32.whl", hash = "sha256:ed7326563024b6e91fef6b6c7a1a2ff0a71b97793ac33dbbcf38f6005e51ff6e"},
- {file = "lxml-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d7b4beebb178e9183138f552238f7e6613162a42164233e2bda00cb3afac58f"},
- {file = "lxml-5.1.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9bd0ae7cc2b85320abd5e0abad5ccee5564ed5f0cc90245d2f9a8ef330a8deae"},
- {file = "lxml-5.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c1d679df4361408b628f42b26a5d62bd3e9ba7f0c0e7969f925021554755aa"},
- {file = "lxml-5.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2ad3a8ce9e8a767131061a22cd28fdffa3cd2dc193f399ff7b81777f3520e372"},
- {file = "lxml-5.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:304128394c9c22b6569eba2a6d98392b56fbdfbad58f83ea702530be80d0f9df"},
- {file = "lxml-5.1.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d74fcaf87132ffc0447b3c685a9f862ffb5b43e70ea6beec2fb8057d5d2a1fea"},
- {file = "lxml-5.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8cf5877f7ed384dabfdcc37922c3191bf27e55b498fecece9fd5c2c7aaa34c33"},
- {file = "lxml-5.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:877efb968c3d7eb2dad540b6cabf2f1d3c0fbf4b2d309a3c141f79c7e0061324"},
- {file = "lxml-5.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f14a4fb1c1c402a22e6a341a24c1341b4a3def81b41cd354386dcb795f83897"},
- {file = "lxml-5.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:25663d6e99659544ee8fe1b89b1a8c0aaa5e34b103fab124b17fa958c4a324a6"},
- {file = "lxml-5.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8b9f19df998761babaa7f09e6bc169294eefafd6149aaa272081cbddc7ba4ca3"},
- {file = "lxml-5.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e53d7e6a98b64fe54775d23a7c669763451340c3d44ad5e3a3b48a1efbdc96f"},
- {file = "lxml-5.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c3cd1fc1dc7c376c54440aeaaa0dcc803d2126732ff5c6b68ccd619f2e64be4f"},
- {file = "lxml-5.1.0.tar.gz", hash = "sha256:3eea6ed6e6c918e468e693c41ef07f3c3acc310b70ddd9cc72d9ef84bc9564ca"},
+ {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"},
+ {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"},
+ {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"},
+ {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"},
+ {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"},
+ {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"},
+ {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"},
+ {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"},
+ {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"},
+ {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"},
+ {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"},
+ {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"},
+ {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"},
+ {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"},
+ {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"},
+ {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"},
+ {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"},
+ {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"},
+ {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"},
+ {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"},
+ {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"},
+ {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"},
+ {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"},
+ {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"},
+ {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"},
+ {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"},
+ {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"},
+ {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"},
+ {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"},
+ {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"},
+ {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"},
+ {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"},
+ {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"},
+ {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"},
+ {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"},
+ {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"},
+ {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"},
+ {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"},
+ {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"},
+ {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"},
+ {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"},
+ {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"},
+ {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"},
+ {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"},
+ {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"},
]
[package.extras]
cssselect = ["cssselect (>=0.7)"]
+html-clean = ["lxml-html-clean"]
html5 = ["html5lib"]
htmlsoup = ["BeautifulSoup4"]
-source = ["Cython (>=3.0.7)"]
+source = ["Cython (>=3.0.10)"]
[[package]]
name = "lz4"
@@ -5242,42 +5301,42 @@ kerberos = ["requests-kerberos"]
[[package]]
name = "opentelemetry-api"
-version = "1.25.0"
+version = "1.26.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_api-1.25.0-py3-none-any.whl", hash = "sha256:757fa1aa020a0f8fa139f8959e53dec2051cc26b832e76fa839a6d76ecefd737"},
- {file = "opentelemetry_api-1.25.0.tar.gz", hash = "sha256:77c4985f62f2614e42ce77ee4c9da5fa5f0bc1e1821085e9a47533a9323ae869"},
+ {file = "opentelemetry_api-1.26.0-py3-none-any.whl", hash = "sha256:7d7ea33adf2ceda2dd680b18b1677e4152000b37ca76e679da71ff103b943064"},
+ {file = "opentelemetry_api-1.26.0.tar.gz", hash = "sha256:2bd639e4bed5b18486fef0b5a520aaffde5a18fc225e808a1ac4df363f43a1ce"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-importlib-metadata = ">=6.0,<=7.1"
+importlib-metadata = ">=6.0,<=8.0.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
-version = "1.25.0"
+version = "1.26.0"
description = "OpenTelemetry Protobuf encoding"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_exporter_otlp_proto_common-1.25.0-py3-none-any.whl", hash = "sha256:15637b7d580c2675f70246563363775b4e6de947871e01d0f4e3881d1848d693"},
- {file = "opentelemetry_exporter_otlp_proto_common-1.25.0.tar.gz", hash = "sha256:c93f4e30da4eee02bacd1e004eb82ce4da143a2f8e15b987a9f603e0a85407d3"},
+ {file = "opentelemetry_exporter_otlp_proto_common-1.26.0-py3-none-any.whl", hash = "sha256:ee4d8f8891a1b9c372abf8d109409e5b81947cf66423fd998e56880057afbc71"},
+ {file = "opentelemetry_exporter_otlp_proto_common-1.26.0.tar.gz", hash = "sha256:bdbe50e2e22a1c71acaa0c8ba6efaadd58882e5a5978737a44a4c4b10d304c92"},
]
[package.dependencies]
-opentelemetry-proto = "1.25.0"
+opentelemetry-proto = "1.26.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-grpc"
-version = "1.25.0"
+version = "1.26.0"
description = "OpenTelemetry Collector Protobuf over gRPC Exporter"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_exporter_otlp_proto_grpc-1.25.0-py3-none-any.whl", hash = "sha256:3131028f0c0a155a64c430ca600fd658e8e37043cb13209f0109db5c1a3e4eb4"},
- {file = "opentelemetry_exporter_otlp_proto_grpc-1.25.0.tar.gz", hash = "sha256:c0b1661415acec5af87625587efa1ccab68b873745ca0ee96b69bb1042087eac"},
+ {file = "opentelemetry_exporter_otlp_proto_grpc-1.26.0-py3-none-any.whl", hash = "sha256:e2be5eff72ebcb010675b818e8d7c2e7d61ec451755b8de67a140bc49b9b0280"},
+ {file = "opentelemetry_exporter_otlp_proto_grpc-1.26.0.tar.gz", hash = "sha256:a65b67a9a6b06ba1ec406114568e21afe88c1cdb29c464f2507d529eb906d8ae"},
]
[package.dependencies]
@@ -5285,19 +5344,19 @@ deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
grpcio = ">=1.0.0,<2.0.0"
opentelemetry-api = ">=1.15,<2.0"
-opentelemetry-exporter-otlp-proto-common = "1.25.0"
-opentelemetry-proto = "1.25.0"
-opentelemetry-sdk = ">=1.25.0,<1.26.0"
+opentelemetry-exporter-otlp-proto-common = "1.26.0"
+opentelemetry-proto = "1.26.0"
+opentelemetry-sdk = ">=1.26.0,<1.27.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.46b0"
+version = "0.47b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation-0.46b0-py3-none-any.whl", hash = "sha256:89cd721b9c18c014ca848ccd11181e6b3fd3f6c7669e35d59c48dc527408c18b"},
- {file = "opentelemetry_instrumentation-0.46b0.tar.gz", hash = "sha256:974e0888fb2a1e01c38fbacc9483d024bb1132aad92d6d24e2e5543887a7adda"},
+ {file = "opentelemetry_instrumentation-0.47b0-py3-none-any.whl", hash = "sha256:88974ee52b1db08fc298334b51c19d47e53099c33740e48c4f084bd1afd052d5"},
+ {file = "opentelemetry_instrumentation-0.47b0.tar.gz", hash = "sha256:96f9885e450c35e3f16a4f33145f2ebf620aea910c9fd74a392bbc0f807a350f"},
]
[package.dependencies]
@@ -5307,55 +5366,55 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-asgi"
-version = "0.46b0"
+version = "0.47b0"
description = "ASGI instrumentation for OpenTelemetry"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation_asgi-0.46b0-py3-none-any.whl", hash = "sha256:f13c55c852689573057837a9500aeeffc010c4ba59933c322e8f866573374759"},
- {file = "opentelemetry_instrumentation_asgi-0.46b0.tar.gz", hash = "sha256:02559f30cf4b7e2a737ab17eb52aa0779bcf4cc06573064f3e2cb4dcc7d3040a"},
+ {file = "opentelemetry_instrumentation_asgi-0.47b0-py3-none-any.whl", hash = "sha256:b798dc4957b3edc9dfecb47a4c05809036a4b762234c5071212fda39ead80ade"},
+ {file = "opentelemetry_instrumentation_asgi-0.47b0.tar.gz", hash = "sha256:e78b7822c1bca0511e5e9610ec484b8994a81670375e570c76f06f69af7c506a"},
]
[package.dependencies]
asgiref = ">=3.0,<4.0"
opentelemetry-api = ">=1.12,<2.0"
-opentelemetry-instrumentation = "0.46b0"
-opentelemetry-semantic-conventions = "0.46b0"
-opentelemetry-util-http = "0.46b0"
+opentelemetry-instrumentation = "0.47b0"
+opentelemetry-semantic-conventions = "0.47b0"
+opentelemetry-util-http = "0.47b0"
[package.extras]
instruments = ["asgiref (>=3.0,<4.0)"]
[[package]]
name = "opentelemetry-instrumentation-fastapi"
-version = "0.46b0"
+version = "0.47b0"
description = "OpenTelemetry FastAPI Instrumentation"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation_fastapi-0.46b0-py3-none-any.whl", hash = "sha256:e0f5d150c6c36833dd011f0e6ef5ede6d7406c1aed0c7c98b2d3b38a018d1b33"},
- {file = "opentelemetry_instrumentation_fastapi-0.46b0.tar.gz", hash = "sha256:928a883a36fc89f9702f15edce43d1a7104da93d740281e32d50ffd03dbb4365"},
+ {file = "opentelemetry_instrumentation_fastapi-0.47b0-py3-none-any.whl", hash = "sha256:5ac28dd401160b02e4f544a85a9e4f61a8cbe5b077ea0379d411615376a2bd21"},
+ {file = "opentelemetry_instrumentation_fastapi-0.47b0.tar.gz", hash = "sha256:0c7c10b5d971e99a420678ffd16c5b1ea4f0db3b31b62faf305fbb03b4ebee36"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
-opentelemetry-instrumentation = "0.46b0"
-opentelemetry-instrumentation-asgi = "0.46b0"
-opentelemetry-semantic-conventions = "0.46b0"
-opentelemetry-util-http = "0.46b0"
+opentelemetry-instrumentation = "0.47b0"
+opentelemetry-instrumentation-asgi = "0.47b0"
+opentelemetry-semantic-conventions = "0.47b0"
+opentelemetry-util-http = "0.47b0"
[package.extras]
-instruments = ["fastapi (>=0.58,<1.0)"]
+instruments = ["fastapi (>=0.58,<1.0)", "fastapi-slim (>=0.111.0,<0.112.0)"]
[[package]]
name = "opentelemetry-proto"
-version = "1.25.0"
+version = "1.26.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_proto-1.25.0-py3-none-any.whl", hash = "sha256:f07e3341c78d835d9b86665903b199893befa5e98866f63d22b00d0b7ca4972f"},
- {file = "opentelemetry_proto-1.25.0.tar.gz", hash = "sha256:35b6ef9dc4a9f7853ecc5006738ad40443701e52c26099e197895cbda8b815a3"},
+ {file = "opentelemetry_proto-1.26.0-py3-none-any.whl", hash = "sha256:6c4d7b4d4d9c88543bcf8c28ae3f8f0448a753dc291c18c5390444c90b76a725"},
+ {file = "opentelemetry_proto-1.26.0.tar.gz", hash = "sha256:c5c18796c0cab3751fc3b98dee53855835e90c0422924b484432ac852d93dc1e"},
]
[package.dependencies]
@@ -5363,43 +5422,44 @@ protobuf = ">=3.19,<5.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.25.0"
+version = "1.26.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_sdk-1.25.0-py3-none-any.whl", hash = "sha256:d97ff7ec4b351692e9d5a15af570c693b8715ad78b8aafbec5c7100fe966b4c9"},
- {file = "opentelemetry_sdk-1.25.0.tar.gz", hash = "sha256:ce7fc319c57707ef5bf8b74fb9f8ebdb8bfafbe11898410e0d2a761d08a98ec7"},
+ {file = "opentelemetry_sdk-1.26.0-py3-none-any.whl", hash = "sha256:feb5056a84a88670c041ea0ded9921fca559efec03905dddeb3885525e0af897"},
+ {file = "opentelemetry_sdk-1.26.0.tar.gz", hash = "sha256:c90d2868f8805619535c05562d699e2f4fb1f00dbd55a86dcefca4da6fa02f85"},
]
[package.dependencies]
-opentelemetry-api = "1.25.0"
-opentelemetry-semantic-conventions = "0.46b0"
+opentelemetry-api = "1.26.0"
+opentelemetry-semantic-conventions = "0.47b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.46b0"
+version = "0.47b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_semantic_conventions-0.46b0-py3-none-any.whl", hash = "sha256:6daef4ef9fa51d51855d9f8e0ccd3a1bd59e0e545abe99ac6203804e36ab3e07"},
- {file = "opentelemetry_semantic_conventions-0.46b0.tar.gz", hash = "sha256:fbc982ecbb6a6e90869b15c1673be90bd18c8a56ff1cffc0864e38e2edffaefa"},
+ {file = "opentelemetry_semantic_conventions-0.47b0-py3-none-any.whl", hash = "sha256:4ff9d595b85a59c1c1413f02bba320ce7ea6bf9e2ead2b0913c4395c7bbc1063"},
+ {file = "opentelemetry_semantic_conventions-0.47b0.tar.gz", hash = "sha256:a8d57999bbe3495ffd4d510de26a97dadc1dace53e0275001b2c1b2f67992a7e"},
]
[package.dependencies]
-opentelemetry-api = "1.25.0"
+deprecated = ">=1.2.6"
+opentelemetry-api = "1.26.0"
[[package]]
name = "opentelemetry-util-http"
-version = "0.46b0"
+version = "0.47b0"
description = "Web util for OpenTelemetry"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_util_http-0.46b0-py3-none-any.whl", hash = "sha256:8dc1949ce63caef08db84ae977fdc1848fe6dc38e6bbaad0ae3e6ecd0d451629"},
- {file = "opentelemetry_util_http-0.46b0.tar.gz", hash = "sha256:03b6e222642f9c7eae58d9132343e045b50aca9761fcb53709bd2b663571fdf6"},
+ {file = "opentelemetry_util_http-0.47b0-py3-none-any.whl", hash = "sha256:3d3215e09c4a723b12da6d0233a31395aeb2bb33a64d7b15a1500690ba250f19"},
+ {file = "opentelemetry_util_http-0.47b0.tar.gz", hash = "sha256:352a07664c18eef827eb8ddcbd64c64a7284a39dd1655e2f16f577eb046ccb32"},
]
[[package]]
@@ -5921,22 +5981,22 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]]
name = "protobuf"
-version = "4.25.3"
+version = "4.25.4"
description = ""
optional = false
python-versions = ">=3.8"
files = [
- {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"},
- {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"},
- {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"},
- {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"},
- {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"},
- {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"},
- {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"},
- {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"},
- {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"},
- {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"},
- {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"},
+ {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"},
+ {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"},
+ {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"},
+ {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"},
+ {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"},
+ {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"},
+ {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"},
+ {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"},
+ {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"},
+ {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"},
+ {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"},
]
[[package]]
@@ -6737,20 +6797,6 @@ files = [
{file = "python_magic-0.4.27-py2.py3-none-any.whl", hash = "sha256:c212960ad306f700aa0d01e5d7a325d20548ff97eb9920dcd29513174f0294d3"},
]
-[[package]]
-name = "python-multipart"
-version = "0.0.9"
-description = "A streaming multipart parser for Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"},
- {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"},
-]
-
-[package.extras]
-dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"]
-
[[package]]
name = "python-pptx"
version = "0.6.23"
@@ -6814,62 +6860,64 @@ files = [
[[package]]
name = "pyyaml"
-version = "6.0.1"
+version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
files = [
- {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
- {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
- {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
- {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
- {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
- {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
- {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
- {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
- {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
- {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
- {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
- {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
- {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
- {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
- {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
- {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
- {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
- {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
- {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
- {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
- {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
- {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
- {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
- {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
- {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
- {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
- {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
- {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
- {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
- {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
- {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
- {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
- {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
- {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
- {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
- {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
- {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
- {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
- {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
- {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
- {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
- {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
- {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
- {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
- {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
- {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
- {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
- {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
- {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+ {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+ {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
@@ -6940,104 +6988,119 @@ dev = ["pytest"]
[[package]]
name = "rapidfuzz"
-version = "3.9.4"
+version = "3.9.6"
description = "rapid fuzzy string matching"
optional = false
python-versions = ">=3.8"
files = [
- {file = "rapidfuzz-3.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9b9793c19bdf38656c8eaefbcf4549d798572dadd70581379e666035c9df781"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:015b5080b999404fe06ec2cb4f40b0be62f0710c926ab41e82dfbc28e80675b4"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc5ceca9c1e1663f3e6c23fb89a311f69b7615a40ddd7645e3435bf3082688a"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1424e238bc3f20e1759db1e0afb48a988a9ece183724bef91ea2a291c0b92a95"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed01378f605aa1f449bee82cd9c83772883120d6483e90aa6c5a4ce95dc5c3aa"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb26d412271e5a76cdee1c2d6bf9881310665d3fe43b882d0ed24edfcb891a84"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f37e9e1f17be193c41a31c864ad4cd3ebd2b40780db11cd5c04abf2bcf4201b"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d070ec5cf96b927c4dc5133c598c7ff6db3b833b363b2919b13417f1002560bc"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:10e61bb7bc807968cef09a0e32ce253711a2d450a4dce7841d21d45330ffdb24"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:31a2fc60bb2c7face4140010a7aeeafed18b4f9cdfa495cc644a68a8c60d1ff7"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fbebf1791a71a2e89f5c12b78abddc018354d5859e305ec3372fdae14f80a826"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:aee9fc9e3bb488d040afc590c0a7904597bf4ccd50d1491c3f4a5e7e67e6cd2c"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-win32.whl", hash = "sha256:005a02688a51c7d2451a2d41c79d737aa326ff54167211b78a383fc2aace2c2c"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:3a2e75e41ee3274754d3b2163cc6c82cd95b892a85ab031f57112e09da36455f"},
- {file = "rapidfuzz-3.9.4-cp310-cp310-win_arm64.whl", hash = "sha256:2c99d355f37f2b289e978e761f2f8efeedc2b14f4751d9ff7ee344a9a5ca98d9"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:07141aa6099e39d48637ce72a25b893fc1e433c50b3e837c75d8edf99e0c63e1"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:db1664eaff5d7d0f2542dd9c25d272478deaf2c8412e4ad93770e2e2d828e175"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc01a223f6605737bec3202e94dcb1a449b6c76d46082cfc4aa980f2a60fd40e"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1869c42e73e2a8910b479be204fa736418741b63ea2325f9cc583c30f2ded41a"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:62ea7007941fb2795fff305ac858f3521ec694c829d5126e8f52a3e92ae75526"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:698e992436bf7f0afc750690c301215a36ff952a6dcd62882ec13b9a1ebf7a39"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b76f611935f15a209d3730c360c56b6df8911a9e81e6a38022efbfb96e433bab"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129627d730db2e11f76169344a032f4e3883d34f20829419916df31d6d1338b1"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:90a82143c14e9a14b723a118c9ef8d1bbc0c5a16b1ac622a1e6c916caff44dd8"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ded58612fe3b0e0d06e935eaeaf5a9fd27da8ba9ed3e2596307f40351923bf72"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f16f5d1c4f02fab18366f2d703391fcdbd87c944ea10736ca1dc3d70d8bd2d8b"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:26aa7eece23e0df55fb75fbc2a8fb678322e07c77d1fd0e9540496e6e2b5f03e"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-win32.whl", hash = "sha256:f187a9c3b940ce1ee324710626daf72c05599946bd6748abe9e289f1daa9a077"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d8e9130fe5d7c9182990b366ad78fd632f744097e753e08ace573877d67c32f8"},
- {file = "rapidfuzz-3.9.4-cp311-cp311-win_arm64.whl", hash = "sha256:40419e98b10cd6a00ce26e4837a67362f658fc3cd7a71bd8bd25c99f7ee8fea5"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b5d5072b548db1b313a07d62d88fe0b037bd2783c16607c647e01b070f6cf9e5"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cf5bcf22e1f0fd273354462631d443ef78d677f7d2fc292de2aec72ae1473e66"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c8fc973adde8ed52810f590410e03fb6f0b541bbaeb04c38d77e63442b2df4c"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2464bb120f135293e9a712e342c43695d3d83168907df05f8c4ead1612310c7"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8d9d58689aca22057cf1a5851677b8a3ccc9b535ca008c7ed06dc6e1899f7844"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167e745f98baa0f3034c13583e6302fb69249a01239f1483d68c27abb841e0a1"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db0bf0663b4b6da1507869722420ea9356b6195aa907228d6201303e69837af9"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cd6ac61b74fdb9e23f04d5f068e6cf554f47e77228ca28aa2347a6ca8903972f"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:60ff67c690acecf381759c16cb06c878328fe2361ddf77b25d0e434ea48a29da"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:cb934363380c60f3a57d14af94325125cd8cded9822611a9f78220444034e36e"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe833493fb5cc5682c823ea3e2f7066b07612ee8f61ecdf03e1268f262106cdd"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2797fb847d89e04040d281cb1902cbeffbc4b5131a5c53fc0db490fd76b2a547"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-win32.whl", hash = "sha256:52e3d89377744dae68ed7c84ad0ddd3f5e891c82d48d26423b9e066fc835cc7c"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:c76da20481c906e08400ee9be230f9e611d5931a33707d9df40337c2655c84b5"},
- {file = "rapidfuzz-3.9.4-cp312-cp312-win_arm64.whl", hash = "sha256:f2d2846f3980445864c7e8b8818a29707fcaff2f0261159ef6b7bd27ba139296"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:355fc4a268ffa07bab88d9adee173783ec8d20136059e028d2a9135c623c44e6"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d81a78f90269190b568a8353d4ea86015289c36d7e525cd4d43176c88eff429"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e618625ffc4660b26dc8e56225f8b966d5842fa190e70c60db6cd393e25b86e"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b712336ad6f2bacdbc9f1452556e8942269ef71f60a9e6883ef1726b52d9228a"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc1ee19fdad05770c897e793836c002344524301501d71ef2e832847425707"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1950f8597890c0c707cb7e0416c62a1cf03dcdb0384bc0b2dbda7e05efe738ec"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a6c35f272ec9c430568dc8c1c30cb873f6bc96be2c79795e0bce6db4e0e101d"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:1df0f9e9239132a231c86ae4f545ec2b55409fa44470692fcfb36b1bd00157ad"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:d2c51955329bfccf99ae26f63d5928bf5be9fcfcd9f458f6847fd4b7e2b8986c"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:3c522f462d9fc504f2ea8d82e44aa580e60566acc754422c829ad75c752fbf8d"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:d8a52fc50ded60d81117d7647f262c529659fb21d23e14ebfd0b35efa4f1b83d"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:04dbdfb0f0bfd3f99cf1e9e24fadc6ded2736d7933f32f1151b0f2abb38f9a25"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-win32.whl", hash = "sha256:4968c8bd1df84b42f382549e6226710ad3476f976389839168db3e68fd373298"},
- {file = "rapidfuzz-3.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:3fe4545f89f8d6c27b6bbbabfe40839624873c08bd6700f63ac36970a179f8f5"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9f256c8fb8f3125574c8c0c919ab0a1f75d7cba4d053dda2e762dcc36357969d"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f5fdc09cf6e9d8eac3ce48a4615b3a3ee332ea84ac9657dbbefef913b13e632f"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d395d46b80063d3b5d13c0af43d2c2cedf3ab48c6a0c2aeec715aa5455b0c632"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7fa714fb96ce9e70c37e64c83b62fe8307030081a0bfae74a76fac7ba0f91715"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bc1a0f29f9119be7a8d3c720f1d2068317ae532e39e4f7f948607c3a6de8396"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6022674aa1747d6300f699cd7c54d7dae89bfe1f84556de699c4ac5df0838082"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcb72e5f9762fd469701a7e12e94b924af9004954f8c739f925cb19c00862e38"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad04ae301129f0eb5b350a333accd375ce155a0c1cec85ab0ec01f770214e2e4"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f46a22506f17c0433e349f2d1dc11907c393d9b3601b91d4e334fa9a439a6a4d"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:01b42a8728c36011718da409aa86b84984396bf0ca3bfb6e62624f2014f6022c"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e590d5d5443cf56f83a51d3c4867bd1f6be8ef8cfcc44279522bcef3845b2a51"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4c72078b5fdce34ba5753f9299ae304e282420e6455e043ad08e4488ca13a2b0"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-win32.whl", hash = "sha256:f75639277304e9b75e6a7b3c07042d2264e16740a11e449645689ed28e9c2124"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:e81e27e8c32a1e1278a4bb1ce31401bfaa8c2cc697a053b985a6f8d013df83ec"},
- {file = "rapidfuzz-3.9.4-cp39-cp39-win_arm64.whl", hash = "sha256:15bc397ee9a3ed1210b629b9f5f1da809244adc51ce620c504138c6e7095b7bd"},
- {file = "rapidfuzz-3.9.4-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:20488ade4e1ddba3cfad04f400da7a9c1b91eff5b7bd3d1c50b385d78b587f4f"},
- {file = "rapidfuzz-3.9.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:e61b03509b1a6eb31bc5582694f6df837d340535da7eba7bedb8ae42a2fcd0b9"},
- {file = "rapidfuzz-3.9.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:098d231d4e51644d421a641f4a5f2f151f856f53c252b03516e01389b2bfef99"},
- {file = "rapidfuzz-3.9.4-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17ab8b7d10fde8dd763ad428aa961c0f30a1b44426e675186af8903b5d134fb0"},
- {file = "rapidfuzz-3.9.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e272df61bee0a056a3daf99f9b1bd82cf73ace7d668894788139c868fdf37d6f"},
- {file = "rapidfuzz-3.9.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d6481e099ff8c4edda85b8b9b5174c200540fd23c8f38120016c765a86fa01f5"},
- {file = "rapidfuzz-3.9.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ad61676e9bdae677d577fe80ec1c2cea1d150c86be647e652551dcfe505b1113"},
- {file = "rapidfuzz-3.9.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:af65020c0dd48d0d8ae405e7e69b9d8ae306eb9b6249ca8bf511a13f465fad85"},
- {file = "rapidfuzz-3.9.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d38b4e026fcd580e0bda6c0ae941e0e9a52c6bc66cdce0b8b0da61e1959f5f8"},
- {file = "rapidfuzz-3.9.4-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f74ed072c2b9dc6743fb19994319d443a4330b0e64aeba0aa9105406c7c5b9c2"},
- {file = "rapidfuzz-3.9.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aee5f6b8321f90615c184bd8a4c676e9becda69b8e4e451a90923db719d6857c"},
- {file = "rapidfuzz-3.9.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3a555e3c841d6efa350f862204bb0a3fea0c006b8acc9b152b374fa36518a1c6"},
- {file = "rapidfuzz-3.9.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0772150d37bf018110351c01d032bf9ab25127b966a29830faa8ad69b7e2f651"},
- {file = "rapidfuzz-3.9.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:addcdd3c3deef1bd54075bd7aba0a6ea9f1d01764a08620074b7a7b1e5447cb9"},
- {file = "rapidfuzz-3.9.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fe86b82b776554add8f900b6af202b74eb5efe8f25acdb8680a5c977608727f"},
- {file = "rapidfuzz-3.9.4-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0fc91ac59f4414d8542454dfd6287a154b8e6f1256718c898f695bdbb993467"},
- {file = "rapidfuzz-3.9.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a944e546a296a5fdcaabb537b01459f1b14d66f74e584cb2a91448bffadc3c1"},
- {file = "rapidfuzz-3.9.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4fb96ba96d58c668a17a06b5b5e8340fedc26188e87b0d229d38104556f30cd8"},
- {file = "rapidfuzz-3.9.4.tar.gz", hash = "sha256:366bf8947b84e37f2f4cf31aaf5f37c39f620d8c0eddb8b633e6ba0129ca4a0a"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7ed0d0b9c85720f0ae33ac5efc8dc3f60c1489dad5c29d735fbdf2f66f0431f"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f3deff6ab7017ed21b9aec5874a07ad13e6b2a688af055837f88b743c7bfd947"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3f9fc060160507b2704f7d1491bd58453d69689b580cbc85289335b14fe8ca"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e86c2b3827fa6169ad6e7d4b790ce02a20acefb8b78d92fa4249589bbc7a2c"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f982e1aafb4bd8207a5e073b1efef9e68a984e91330e1bbf364f9ed157ed83f0"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9196a51d0ec5eaaaf5bca54a85b7b1e666fc944c332f68e6427503af9fb8c49e"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb5a514064e02585b1cc09da2fe406a6dc1a7e5f3e92dd4f27c53e5f1465ec81"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e3a4244f65dbc3580b1275480118c3763f9dc29fc3dd96610560cb5e140a4d4a"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f6ebb910a702e41641e1e1dada3843bc11ba9107a33c98daef6945a885a40a07"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:624fbe96115fb39addafa288d583b5493bc76dab1d34d0ebba9987d6871afdf9"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1c59f1c1507b7a557cf3c410c76e91f097460da7d97e51c985343798e9df7a3c"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f6f0256cb27b6a0fb2e1918477d1b56473cd04acfa245376a342e7c15806a396"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-win32.whl", hash = "sha256:24d473d00d23a30a85802b502b417a7f5126019c3beec91a6739fe7b95388b24"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:248f6d2612e661e2b5f9a22bbd5862a1600e720da7bb6ad8a55bb1548cdfa423"},
+ {file = "rapidfuzz-3.9.6-cp310-cp310-win_arm64.whl", hash = "sha256:e03fdf0e74f346ed7e798135df5f2a0fb8d6b96582b00ebef202dcf2171e1d1d"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:52e4675f642fbc85632f691b67115a243cd4d2a47bdcc4a3d9a79e784518ff97"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1f93a2f13038700bd245b927c46a2017db3dcd4d4ff94687d74b5123689b873b"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b70500bca460264b8141d8040caee22e9cf0418c5388104ff0c73fb69ee28f"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1e037fb89f714a220f68f902fc6300ab7a33349f3ce8ffae668c3b3a40b0b06"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6792f66d59b86ccfad5e247f2912e255c85c575789acdbad8e7f561412ffed8a"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68d9cffe710b67f1969cf996983608cee4490521d96ea91d16bd7ea5dc80ea98"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63daaeeea76da17fa0bbe7fb05cba8ed8064bb1a0edf8360636557f8b6511961"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d214e063bffa13e3b771520b74f674b22d309b5720d4df9918ff3e0c0f037720"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ed443a2062460f44c0346cb9d269b586496b808c2419bbd6057f54061c9b9c75"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:5b0c9b227ee0076fb2d58301c505bb837a290ae99ee628beacdb719f0626d749"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:82c9722b7dfaa71e8b61f8c89fed0482567fb69178e139fe4151fc71ed7df782"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c18897c95c0a288347e29537b63608a8f63a5c3cb6da258ac46fcf89155e723e"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-win32.whl", hash = "sha256:3e910cf08944da381159587709daaad9e59d8ff7bca1f788d15928f3c3d49c2a"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:59c4a61fab676d37329fc3a671618a461bfeef53a4d0b8b12e3bc24a14e166f8"},
+ {file = "rapidfuzz-3.9.6-cp311-cp311-win_arm64.whl", hash = "sha256:8b4afea244102332973377fddbe54ce844d0916e1c67a5123432291717f32ffa"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:70591b28b218fff351b88cdd7f2359a01a71f9f7f5a2e465ce3715ed4b3c422b"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee2d8355c7343c631a03e57540ea06e8717c19ecf5ff64ea07e0498f7f161457"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:708fb675de0f47b9635d1cc6fbbf80d52cb710d0a1abbfae5c84c46e3abbddc3"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d66c247c2d3bb7a9b60567c395a15a929d0ebcc5f4ceedb55bfa202c38c6e0c"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15146301b32e6e3d2b7e8146db1a26747919d8b13690c7f83a4cb5dc111b3a08"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7a03da59b6c7c97e657dd5cd4bcaab5fe4a2affd8193958d6f4d938bee36679"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d2c2fe19e392dbc22695b6c3b2510527e2b774647e79936bbde49db7742d6f1"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:91aaee4c94cb45930684f583ffc4e7c01a52b46610971cede33586cf8a04a12e"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3f5702828c10768f9281180a7ff8597da1e5002803e1304e9519dd0f06d79a85"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ccd1763b608fb4629a0b08f00b3c099d6395e67c14e619f6341b2c8429c2f310"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc7a0d4b2cb166bc46d02c8c9f7551cde8e2f3c9789df3827309433ee9771163"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7496f53d40560a58964207b52586783633f371683834a8f719d6d965d223a2eb"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-win32.whl", hash = "sha256:5eb1a9272ca71bc72be5415c2fa8448a6302ea4578e181bb7da9db855b367df0"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-win_amd64.whl", hash = "sha256:0d21fc3c0ca507a1180152a6dbd129ebaef48facde3f943db5c1055b6e6be56a"},
+ {file = "rapidfuzz-3.9.6-cp312-cp312-win_arm64.whl", hash = "sha256:43bb27a57c29dc5fa754496ba6a1a508480d21ae99ac0d19597646c16407e9f3"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:83a5ac6547a9d6eedaa212975cb8f2ce2aa07e6e30833b40e54a52b9f9999aa4"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:10f06139142ecde67078ebc9a745965446132b998f9feebffd71acdf218acfcc"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74720c3f24597f76c7c3e2c4abdff55f1664f4766ff5b28aeaa689f8ffba5fab"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce2bce52b5c150878e558a0418c2b637fb3dbb6eb38e4eb27d24aa839920483e"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1611199f178793ca9a060c99b284e11f6d7d124998191f1cace9a0245334d219"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0308b2ad161daf502908a6e21a57c78ded0258eba9a8f5e2545e2dafca312507"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eda91832201b86e3b70835f91522587725bec329ec68f2f7faf5124091e5ca7"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ece873c093aedd87fc07c2a7e333d52e458dc177016afa1edaf157e82b6914d8"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d97d3c9d209d5c30172baea5966f2129e8a198fec4a1aeb2f92abb6e82a2edb1"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6c4550d0db4931f5ebe9f0678916d1b06f06f5a99ba0b8a48b9457fd8959a7d4"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b6b8dd4af6324fc325d9483bec75ecf9be33e590928c9202d408e4eafff6a0a6"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:16122ae448bc89e2bea9d81ce6cb0f751e4e07da39bd1e70b95cae2493857853"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-win32.whl", hash = "sha256:71cc168c305a4445109cd0d4925406f6e66bcb48fde99a1835387c58af4ecfe9"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-win_amd64.whl", hash = "sha256:59ee78f2ecd53fef8454909cda7400fe2cfcd820f62b8a5d4dfe930102268054"},
+ {file = "rapidfuzz-3.9.6-cp313-cp313-win_arm64.whl", hash = "sha256:58b4ce83f223605c358ae37e7a2d19a41b96aa65b1fede99cc664c9053af89ac"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9f469dbc9c4aeaac7dd005992af74b7dff94aa56a3ea063ce64e4b3e6736dd2f"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a9ed7ad9adb68d0fe63a156fe752bbf5f1403ed66961551e749641af2874da92"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39ffe48ffbeedf78d120ddfb9d583f2ca906712159a4e9c3c743c9f33e7b1775"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8502ccdea9084d54b6f737d96a3b60a84e3afed9d016686dc979b49cdac71613"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6a4bec4956e06b170ca896ba055d08d4c457dac745548172443982956a80e118"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c0488b1c273be39e109ff885ccac0448b2fa74dea4c4dc676bcf756c15f16d6"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0542c036cb6acf24edd2c9e0411a67d7ba71e29e4d3001a082466b86fc34ff30"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0a96b52c9f26857bf009e270dcd829381e7a634f7ddd585fa29b87d4c82146d9"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6edd3cd7c4aa8c68c716d349f531bd5011f2ca49ddade216bb4429460151559f"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:50b2fb55d7ed58c66d49c9f954acd8fc4a3f0e9fd0ff708299bd8abb68238d0e"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:32848dfe54391636b84cda1823fd23e5a6b1dbb8be0e9a1d80e4ee9903820994"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:29146cb7a1bf69c87e928b31bffa54f066cb65639d073b36e1425f98cccdebc6"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-win32.whl", hash = "sha256:aed13e5edacb0ecadcc304cc66e93e7e77ff24f059c9792ee602c0381808e10c"},
+ {file = "rapidfuzz-3.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:af440e36b828922256d0b4d79443bf2cbe5515fc4b0e9e96017ec789b36bb9fc"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:efa674b407424553024522159296690d99d6e6b1192cafe99ca84592faff16b4"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0b40ff76ee19b03ebf10a0a87938f86814996a822786c41c3312d251b7927849"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16a6c7997cb5927ced6f617122eb116ba514ec6b6f60f4803e7925ef55158891"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3f42504bdc8d770987fc3d99964766d42b2a03e4d5b0f891decdd256236bae0"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9462aa2be9f60b540c19a083471fdf28e7cf6434f068b631525b5e6251b35e"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1629698e68f47609a73bf9e73a6da3a4cac20bc710529215cbdf111ab603665b"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68bc7621843d8e9a7fd1b1a32729465bf94b47b6fb307d906da168413331f8d6"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c6254c50f15bc2fcc33cb93a95a81b702d9e6590f432a7f7822b8c7aba9ae288"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:7e535a114fa575bc143e175e4ca386a467ec8c42909eff500f5f0f13dc84e3e0"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d50acc0e9d67e4ba7a004a14c42d1b1e8b6ca1c515692746f4f8e7948c673167"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fa742ec60bec53c5a211632cf1d31b9eb5a3c80f1371a46a23ac25a1fa2ab209"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c256fa95d29cbe5aa717db790b231a9a5b49e5983d50dc9df29d364a1db5e35b"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-win32.whl", hash = "sha256:89acbf728b764421036c173a10ada436ecca22999851cdc01d0aa904c70d362d"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:c608fcba8b14d86c04cb56b203fed31a96e8a1ebb4ce99e7b70313c5bf8cf497"},
+ {file = "rapidfuzz-3.9.6-cp39-cp39-win_arm64.whl", hash = "sha256:d41c00ded0e22e9dba88ff23ebe0dc9d2a5f21ba2f88e185ea7374461e61daa9"},
+ {file = "rapidfuzz-3.9.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a65c2f63218ea2dedd56fc56361035e189ca123bd9c9ce63a9bef6f99540d681"},
+ {file = "rapidfuzz-3.9.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:680dc78a5f889d3b89f74824b89fe357f49f88ad10d2c121e9c3ad37bac1e4eb"},
+ {file = "rapidfuzz-3.9.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8ca862927a0b05bd825e46ddf82d0724ea44b07d898ef639386530bf9b40f15"},
+ {file = "rapidfuzz-3.9.6-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2116fa1fbff21fa52cd46f3cfcb1e193ba1d65d81f8b6e123193451cd3d6c15e"},
+ {file = "rapidfuzz-3.9.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dcb7d9afd740370a897c15da61d3d57a8d54738d7c764a99cedb5f746d6a003"},
+ {file = "rapidfuzz-3.9.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1a5bd6401bb489e14cbb5981c378d53ede850b7cc84b2464cad606149cc4e17d"},
+ {file = "rapidfuzz-3.9.6-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:29fda70b9d03e29df6fc45cc27cbcc235534b1b0b2900e0a3ae0b43022aaeef5"},
+ {file = "rapidfuzz-3.9.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:88144f5f52ae977df9352029488326afadd7a7f42c6779d486d1f82d43b2b1f2"},
+ {file = "rapidfuzz-3.9.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:715aeaabafba2709b9dd91acb2a44bad59d60b4616ef90c08f4d4402a3bbca60"},
+ {file = "rapidfuzz-3.9.6-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af26ebd3714224fbf9bebbc27bdbac14f334c15f5d7043699cd694635050d6ca"},
+ {file = "rapidfuzz-3.9.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101bd2df438861a005ed47c032631b7857dfcdb17b82beeeb410307983aac61d"},
+ {file = "rapidfuzz-3.9.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2185e8e29809b97ad22a7f99281d1669a89bdf5fa1ef4ef1feca36924e675367"},
+ {file = "rapidfuzz-3.9.6-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9e53c72d08f0e9c6e4a369e52df5971f311305b4487690c62e8dd0846770260c"},
+ {file = "rapidfuzz-3.9.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a0cb157162f0cdd62e538c7bd298ff669847fc43a96422811d5ab933f4c16c3a"},
+ {file = "rapidfuzz-3.9.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bb5ff2bd48132ed5e7fbb8f619885facb2e023759f2519a448b2c18afe07e5d"},
+ {file = "rapidfuzz-3.9.6-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6dc37f601865e8407e3a8037ffbc3afe0b0f837b2146f7632bd29d087385babe"},
+ {file = "rapidfuzz-3.9.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a657eee4b94668faf1fa2703bdd803654303f7e468eb9ba10a664d867ed9e779"},
+ {file = "rapidfuzz-3.9.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:51be6ab5b1d5bb32abd39718f2a5e3835502e026a8272d139ead295c224a6f5e"},
+ {file = "rapidfuzz-3.9.6.tar.gz", hash = "sha256:5cf2a7d621e4515fee84722e93563bf77ff2cbe832a77a48b81f88f9e23b9e8d"},
]
[package.extras]
@@ -7067,109 +7130,109 @@ test = ["coveralls", "pycodestyle", "pyflakes", "pylint", "pytest", "pytest-benc
[[package]]
name = "redis"
-version = "5.0.7"
+version = "5.0.8"
description = "Python client for Redis database and key-value store"
optional = false
python-versions = ">=3.7"
files = [
- {file = "redis-5.0.7-py3-none-any.whl", hash = "sha256:0e479e24da960c690be5d9b96d21f7b918a98c0cf49af3b6fafaa0753f93a0db"},
- {file = "redis-5.0.7.tar.gz", hash = "sha256:8f611490b93c8109b50adc317b31bfd84fff31def3475b92e7e80bf39f48175b"},
+ {file = "redis-5.0.8-py3-none-any.whl", hash = "sha256:56134ee08ea909106090934adc36f65c9bcbbaecea5b21ba704ba6fb561f8eb4"},
+ {file = "redis-5.0.8.tar.gz", hash = "sha256:0c5b10d387568dfe0698c6fad6615750c24170e548ca2deac10c649d463e9870"},
]
[package.dependencies]
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
-hiredis = {version = ">=1.0.0", optional = true, markers = "extra == \"hiredis\""}
+hiredis = {version = ">1.0.0", optional = true, markers = "extra == \"hiredis\""}
[package.extras]
-hiredis = ["hiredis (>=1.0.0)"]
+hiredis = ["hiredis (>1.0.0)"]
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"]
[[package]]
name = "regex"
-version = "2024.5.15"
+version = "2024.7.24"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
files = [
- {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"},
- {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"},
- {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"},
- {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"},
- {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"},
- {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"},
- {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"},
- {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"},
- {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"},
- {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"},
- {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"},
- {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"},
- {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"},
- {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"},
- {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"},
- {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"},
- {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"},
- {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"},
- {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"},
- {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"},
- {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"},
- {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"},
- {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"},
- {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"},
- {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"},
- {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"},
- {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"},
- {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"},
- {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"},
- {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"},
- {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"},
- {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"},
- {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"},
- {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"},
- {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"},
- {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"},
- {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"},
- {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"},
- {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"},
- {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"},
- {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"},
- {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"},
- {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"},
- {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"},
- {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"},
- {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"},
- {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"},
- {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"},
- {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"},
- {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"},
- {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"},
- {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"},
- {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"},
- {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"},
- {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"},
- {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"},
- {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"},
- {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"},
- {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"},
- {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"},
- {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"},
- {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"},
- {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"},
- {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"},
- {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"},
- {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"},
- {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"},
- {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"},
- {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"},
- {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"},
- {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"},
- {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"},
- {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"},
- {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"},
- {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"},
- {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"},
- {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"},
- {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"},
- {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"},
+ {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"},
+ {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"},
+ {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"},
+ {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"},
+ {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"},
+ {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"},
+ {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"},
+ {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"},
+ {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"},
+ {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"},
+ {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"},
+ {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"},
+ {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"},
+ {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"},
+ {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"},
+ {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"},
+ {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"},
+ {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"},
+ {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"},
+ {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"},
+ {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"},
+ {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"},
+ {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"},
+ {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"},
+ {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"},
+ {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"},
]
[[package]]
@@ -7307,29 +7370,29 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "ruff"
-version = "0.5.4"
+version = "0.5.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.5.4-py3-none-linux_armv6l.whl", hash = "sha256:82acef724fc639699b4d3177ed5cc14c2a5aacd92edd578a9e846d5b5ec18ddf"},
- {file = "ruff-0.5.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:da62e87637c8838b325e65beee485f71eb36202ce8e3cdbc24b9fcb8b99a37be"},
- {file = "ruff-0.5.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e98ad088edfe2f3b85a925ee96da652028f093d6b9b56b76fc242d8abb8e2059"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c55efbecc3152d614cfe6c2247a3054cfe358cefbf794f8c79c8575456efe19"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9b85eaa1f653abd0a70603b8b7008d9e00c9fa1bbd0bf40dad3f0c0bdd06793"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cf497a47751be8c883059c4613ba2f50dd06ec672692de2811f039432875278"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:09c14ed6a72af9ccc8d2e313d7acf7037f0faff43cde4b507e66f14e812e37f7"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:628f6b8f97b8bad2490240aa84f3e68f390e13fabc9af5c0d3b96b485921cd60"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3520a00c0563d7a7a7c324ad7e2cde2355733dafa9592c671fb2e9e3cd8194c1"},
- {file = "ruff-0.5.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93789f14ca2244fb91ed481456f6d0bb8af1f75a330e133b67d08f06ad85b516"},
- {file = "ruff-0.5.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:029454e2824eafa25b9df46882f7f7844d36fd8ce51c1b7f6d97e2615a57bbcc"},
- {file = "ruff-0.5.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:9492320eed573a13a0bc09a2957f17aa733fff9ce5bf00e66e6d4a88ec33813f"},
- {file = "ruff-0.5.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6e1f62a92c645e2919b65c02e79d1f61e78a58eddaebca6c23659e7c7cb4ac7"},
- {file = "ruff-0.5.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:768fa9208df2bec4b2ce61dbc7c2ddd6b1be9fb48f1f8d3b78b3332c7d71c1ff"},
- {file = "ruff-0.5.4-py3-none-win32.whl", hash = "sha256:e1e7393e9c56128e870b233c82ceb42164966f25b30f68acbb24ed69ce9c3a4e"},
- {file = "ruff-0.5.4-py3-none-win_amd64.whl", hash = "sha256:58b54459221fd3f661a7329f177f091eb35cf7a603f01d9eb3eb11cc348d38c4"},
- {file = "ruff-0.5.4-py3-none-win_arm64.whl", hash = "sha256:bd53da65f1085fb5b307c38fd3c0829e76acf7b2a912d8d79cadcdb4875c1eb7"},
- {file = "ruff-0.5.4.tar.gz", hash = "sha256:2795726d5f71c4f4e70653273d1c23a8182f07dd8e48c12de5d867bfb7557eed"},
+ {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
+ {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
+ {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"},
+ {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"},
+ {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"},
+ {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"},
+ {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"},
+ {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"},
+ {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"},
+ {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"},
+ {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"},
+ {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
[[package]]
@@ -7351,111 +7414,121 @@ crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"]
[[package]]
name = "safetensors"
-version = "0.4.3"
+version = "0.4.4"
description = ""
optional = false
python-versions = ">=3.7"
files = [
- {file = "safetensors-0.4.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd"},
- {file = "safetensors-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055"},
- {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac"},
- {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237"},
- {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10"},
- {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad"},
- {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376"},
- {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d"},
- {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1"},
- {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf"},
- {file = "safetensors-0.4.3-cp310-none-win32.whl", hash = "sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9"},
- {file = "safetensors-0.4.3-cp310-none-win_amd64.whl", hash = "sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632"},
- {file = "safetensors-0.4.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a"},
- {file = "safetensors-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe"},
- {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93"},
- {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7"},
- {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a"},
- {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3"},
- {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b"},
- {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee"},
- {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9"},
- {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c"},
- {file = "safetensors-0.4.3-cp311-none-win32.whl", hash = "sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61"},
- {file = "safetensors-0.4.3-cp311-none-win_amd64.whl", hash = "sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67"},
- {file = "safetensors-0.4.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856"},
- {file = "safetensors-0.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980"},
- {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc"},
- {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397"},
- {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542"},
- {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df"},
- {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d"},
- {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361"},
- {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e"},
- {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e"},
- {file = "safetensors-0.4.3-cp312-none-win32.whl", hash = "sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3"},
- {file = "safetensors-0.4.3-cp312-none-win_amd64.whl", hash = "sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7"},
- {file = "safetensors-0.4.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d1456f814655b224d4bf6e7915c51ce74e389b413be791203092b7ff78c936dd"},
- {file = "safetensors-0.4.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:455d538aa1aae4a8b279344a08136d3f16334247907b18a5c3c7fa88ef0d3c46"},
- {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf476bca34e1340ee3294ef13e2c625833f83d096cfdf69a5342475602004f95"},
- {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02ef3a24face643456020536591fbd3c717c5abaa2737ec428ccbbc86dffa7a4"},
- {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7de32d0d34b6623bb56ca278f90db081f85fb9c5d327e3c18fd23ac64f465768"},
- {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a0deb16a1d3ea90c244ceb42d2c6c276059616be21a19ac7101aa97da448faf"},
- {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c59d51f182c729f47e841510b70b967b0752039f79f1de23bcdd86462a9b09ee"},
- {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f598b713cc1a4eb31d3b3203557ac308acf21c8f41104cdd74bf640c6e538e3"},
- {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5757e4688f20df083e233b47de43845d1adb7e17b6cf7da5f8444416fc53828d"},
- {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fe746d03ed8d193674a26105e4f0fe6c726f5bb602ffc695b409eaf02f04763d"},
- {file = "safetensors-0.4.3-cp37-none-win32.whl", hash = "sha256:0d5ffc6a80f715c30af253e0e288ad1cd97a3d0086c9c87995e5093ebc075e50"},
- {file = "safetensors-0.4.3-cp37-none-win_amd64.whl", hash = "sha256:a11c374eb63a9c16c5ed146457241182f310902bd2a9c18255781bb832b6748b"},
- {file = "safetensors-0.4.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1e31be7945f66be23f4ec1682bb47faa3df34cb89fc68527de6554d3c4258a4"},
- {file = "safetensors-0.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:03a4447c784917c9bf01d8f2ac5080bc15c41692202cd5f406afba16629e84d6"},
- {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d244bcafeb1bc06d47cfee71727e775bca88a8efda77a13e7306aae3813fa7e4"},
- {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53c4879b9c6bd7cd25d114ee0ef95420e2812e676314300624594940a8d6a91f"},
- {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74707624b81f1b7f2b93f5619d4a9f00934d5948005a03f2c1845ffbfff42212"},
- {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d52c958dc210265157573f81d34adf54e255bc2b59ded6218500c9b15a750eb"},
- {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f9568f380f513a60139971169c4a358b8731509cc19112369902eddb33faa4d"},
- {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0d9cd8e1560dfc514b6d7859247dc6a86ad2f83151a62c577428d5102d872721"},
- {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:89f9f17b0dacb913ed87d57afbc8aad85ea42c1085bd5de2f20d83d13e9fc4b2"},
- {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1139eb436fd201c133d03c81209d39ac57e129f5e74e34bb9ab60f8d9b726270"},
- {file = "safetensors-0.4.3-cp38-none-win32.whl", hash = "sha256:d9c289f140a9ae4853fc2236a2ffc9a9f2d5eae0cb673167e0f1b8c18c0961ac"},
- {file = "safetensors-0.4.3-cp38-none-win_amd64.whl", hash = "sha256:622afd28968ef3e9786562d352659a37de4481a4070f4ebac883f98c5836563e"},
- {file = "safetensors-0.4.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8651c7299cbd8b4161a36cd6a322fa07d39cd23535b144d02f1c1972d0c62f3c"},
- {file = "safetensors-0.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e375d975159ac534c7161269de24ddcd490df2157b55c1a6eeace6cbb56903f0"},
- {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:084fc436e317f83f7071fc6a62ca1c513b2103db325cd09952914b50f51cf78f"},
- {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:41a727a7f5e6ad9f1db6951adee21bbdadc632363d79dc434876369a17de6ad6"},
- {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7dbbde64b6c534548696808a0e01276d28ea5773bc9a2dfb97a88cd3dffe3df"},
- {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbae3b4b9d997971431c346edbfe6e41e98424a097860ee872721e176040a893"},
- {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01e4b22e3284cd866edeabe4f4d896229495da457229408d2e1e4810c5187121"},
- {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dd37306546b58d3043eb044c8103a02792cc024b51d1dd16bd3dd1f334cb3ed"},
- {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8815b5e1dac85fc534a97fd339e12404db557878c090f90442247e87c8aeaea"},
- {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e011cc162503c19f4b1fd63dfcddf73739c7a243a17dac09b78e57a00983ab35"},
- {file = "safetensors-0.4.3-cp39-none-win32.whl", hash = "sha256:01feb3089e5932d7e662eda77c3ecc389f97c0883c4a12b5cfdc32b589a811c3"},
- {file = "safetensors-0.4.3-cp39-none-win_amd64.whl", hash = "sha256:3f9cdca09052f585e62328c1c2923c70f46814715c795be65f0b93f57ec98a02"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab"},
- {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0"},
- {file = "safetensors-0.4.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173"},
- {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd"},
- {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3"},
- {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550"},
- {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0"},
- {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3"},
- {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da"},
- {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65"},
- {file = "safetensors-0.4.3.tar.gz", hash = "sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2"},
+ {file = "safetensors-0.4.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2adb497ada13097f30e386e88c959c0fda855a5f6f98845710f5bb2c57e14f12"},
+ {file = "safetensors-0.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7db7fdc2d71fd1444d85ca3f3d682ba2df7d61a637dfc6d80793f439eae264ab"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d4f0eed76b430f009fbefca1a0028ddb112891b03cb556d7440d5cd68eb89a9"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d216fab0b5c432aabf7170883d7c11671622bde8bd1436c46d633163a703f6"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d9b76322e49c056bcc819f8bdca37a2daa5a6d42c07f30927b501088db03309"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32f0d1f6243e90ee43bc6ee3e8c30ac5b09ca63f5dd35dbc985a1fc5208c451a"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d464bdc384874601a177375028012a5f177f1505279f9456fea84bbc575c7f"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63144e36209ad8e4e65384dbf2d52dd5b1866986079c00a72335402a38aacdc5"},
+ {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:051d5ecd490af7245258000304b812825974d5e56f14a3ff7e1b8b2ba6dc2ed4"},
+ {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51bc8429d9376224cd3cf7e8ce4f208b4c930cd10e515b6ac6a72cbc3370f0d9"},
+ {file = "safetensors-0.4.4-cp310-none-win32.whl", hash = "sha256:fb7b54830cee8cf9923d969e2df87ce20e625b1af2fd194222ab902d3adcc29c"},
+ {file = "safetensors-0.4.4-cp310-none-win_amd64.whl", hash = "sha256:4b3e8aa8226d6560de8c2b9d5ff8555ea482599c670610758afdc97f3e021e9c"},
+ {file = "safetensors-0.4.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bbaa31f2cb49013818bde319232ccd72da62ee40f7d2aa532083eda5664e85ff"},
+ {file = "safetensors-0.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fdcb80f4e9fbb33b58e9bf95e7dbbedff505d1bcd1c05f7c7ce883632710006"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55c14c20be247b8a1aeaf3ab4476265e3ca83096bb8e09bb1a7aa806088def4f"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:949aaa1118660f992dbf0968487b3e3cfdad67f948658ab08c6b5762e90cc8b6"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c11a4ab7debc456326a2bac67f35ee0ac792bcf812c7562a4a28559a5c795e27"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0cea44bba5c5601b297bc8307e4075535b95163402e4906b2e9b82788a2a6df"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9d752c97f6bbe327352f76e5b86442d776abc789249fc5e72eacb49e6916482"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03f2bb92e61b055ef6cc22883ad1ae898010a95730fa988c60a23800eb742c2c"},
+ {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf3f91a9328a941acc44eceffd4e1f5f89b030985b2966637e582157173b98"},
+ {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:20d218ec2b6899d29d6895419a58b6e44cc5ff8f0cc29fac8d236a8978ab702e"},
+ {file = "safetensors-0.4.4-cp311-none-win32.whl", hash = "sha256:8079486118919f600c603536e2490ca37b3dbd3280e3ad6eaacfe6264605ac8a"},
+ {file = "safetensors-0.4.4-cp311-none-win_amd64.whl", hash = "sha256:2f8c2eb0615e2e64ee27d478c7c13f51e5329d7972d9e15528d3e4cfc4a08f0d"},
+ {file = "safetensors-0.4.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baec5675944b4a47749c93c01c73d826ef7d42d36ba8d0dba36336fa80c76426"},
+ {file = "safetensors-0.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f15117b96866401825f3e94543145028a2947d19974429246ce59403f49e77c6"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a13a9caea485df164c51be4eb0c87f97f790b7c3213d635eba2314d959fe929"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b54bc4ca5f9b9bba8cd4fb91c24b2446a86b5ae7f8975cf3b7a277353c3127c"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08332c22e03b651c8eb7bf5fc2de90044f3672f43403b3d9ac7e7e0f4f76495e"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb62841e839ee992c37bb75e75891c7f4904e772db3691c59daaca5b4ab960e1"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5b927acc5f2f59547270b0309a46d983edc44be64e1ca27a7fcb0474d6cd67"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a69c71b1ae98a8021a09a0b43363b0143b0ce74e7c0e83cacba691b62655fb8"},
+ {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23654ad162c02a5636f0cd520a0310902c4421aab1d91a0b667722a4937cc445"},
+ {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0677c109d949cf53756859160b955b2e75b0eefe952189c184d7be30ecf7e858"},
+ {file = "safetensors-0.4.4-cp312-none-win32.whl", hash = "sha256:a51d0ddd4deb8871c6de15a772ef40b3dbd26a3c0451bb9e66bc76fc5a784e5b"},
+ {file = "safetensors-0.4.4-cp312-none-win_amd64.whl", hash = "sha256:2d065059e75a798bc1933c293b68d04d79b586bb7f8c921e0ca1e82759d0dbb1"},
+ {file = "safetensors-0.4.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9d625692578dd40a112df30c02a1adf068027566abd8e6a74893bb13d441c150"},
+ {file = "safetensors-0.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7cabcf39c81e5b988d0adefdaea2eb9b4fd9bd62d5ed6559988c62f36bfa9a89"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8359bef65f49d51476e9811d59c015f0ddae618ee0e44144f5595278c9f8268c"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1a32c662e7df9226fd850f054a3ead0e4213a96a70b5ce37b2d26ba27004e013"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c329a4dcc395364a1c0d2d1574d725fe81a840783dda64c31c5a60fc7d41472c"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:239ee093b1db877c9f8fe2d71331a97f3b9c7c0d3ab9f09c4851004a11f44b65"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd574145d930cf9405a64f9923600879a5ce51d9f315443a5f706374841327b6"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f6784eed29f9e036acb0b7769d9e78a0dc2c72c2d8ba7903005350d817e287a4"},
+ {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:65a4a6072436bf0a4825b1c295d248cc17e5f4651e60ee62427a5bcaa8622a7a"},
+ {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:df81e3407630de060ae8313da49509c3caa33b1a9415562284eaf3d0c7705f9f"},
+ {file = "safetensors-0.4.4-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e4a0f374200e8443d9746e947ebb346c40f83a3970e75a685ade0adbba5c48d9"},
+ {file = "safetensors-0.4.4-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:181fb5f3dee78dae7fd7ec57d02e58f7936498d587c6b7c1c8049ef448c8d285"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb4ac1d8f6b65ec84ddfacd275079e89d9df7c92f95675ba96c4f790a64df6e"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76897944cd9239e8a70955679b531b9a0619f76e25476e57ed373322d9c2075d"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a9e9d1a27e51a0f69e761a3d581c3af46729ec1c988fa1f839e04743026ae35"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:005ef9fc0f47cb9821c40793eb029f712e97278dae84de91cb2b4809b856685d"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26987dac3752688c696c77c3576f951dbbdb8c57f0957a41fb6f933cf84c0b62"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c05270b290acd8d249739f40d272a64dd597d5a4b90f27d830e538bc2549303c"},
+ {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:068d3a33711fc4d93659c825a04480ff5a3854e1d78632cdc8f37fee917e8a60"},
+ {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:063421ef08ca1021feea8b46951251b90ae91f899234dd78297cbe7c1db73b99"},
+ {file = "safetensors-0.4.4-cp37-none-win32.whl", hash = "sha256:d52f5d0615ea83fd853d4e1d8acf93cc2e0223ad4568ba1e1f6ca72e94ea7b9d"},
+ {file = "safetensors-0.4.4-cp37-none-win_amd64.whl", hash = "sha256:88a5ac3280232d4ed8e994cbc03b46a1807ce0aa123867b40c4a41f226c61f94"},
+ {file = "safetensors-0.4.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3467ab511bfe3360967d7dc53b49f272d59309e57a067dd2405b4d35e7dcf9dc"},
+ {file = "safetensors-0.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ab4c96d922e53670ce25fbb9b63d5ea972e244de4fa1dd97b590d9fd66aacef"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87df18fce4440477c3ef1fd7ae17c704a69a74a77e705a12be135ee0651a0c2d"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e5fe345b2bc7d88587149ac11def1f629d2671c4c34f5df38aed0ba59dc37f8"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f1a3e01dce3cd54060791e7e24588417c98b941baa5974700eeb0b8eb65b0a0"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c6bf35e9a8998d8339fd9a05ac4ce465a4d2a2956cc0d837b67c4642ed9e947"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:166c0c52f6488b8538b2a9f3fbc6aad61a7261e170698779b371e81b45f0440d"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87e9903b8668a16ef02c08ba4ebc91e57a49c481e9b5866e31d798632805014b"},
+ {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9c421153aa23c323bd8483d4155b4eee82c9a50ac11cccd83539104a8279c64"},
+ {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4b8617499b2371c7353302c5116a7e0a3a12da66389ce53140e607d3bf7b3d3"},
+ {file = "safetensors-0.4.4-cp38-none-win32.whl", hash = "sha256:c6280f5aeafa1731f0a3709463ab33d8e0624321593951aefada5472f0b313fd"},
+ {file = "safetensors-0.4.4-cp38-none-win_amd64.whl", hash = "sha256:6ceed6247fc2d33b2a7b7d25d8a0fe645b68798856e0bc7a9800c5fd945eb80f"},
+ {file = "safetensors-0.4.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5cf6c6f6193797372adf50c91d0171743d16299491c75acad8650107dffa9269"},
+ {file = "safetensors-0.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:419010156b914a3e5da4e4adf992bee050924d0fe423c4b329e523e2c14c3547"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88f6fd5a5c1302ce79993cc5feeadcc795a70f953c762544d01fb02b2db4ea33"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d468cffb82d90789696d5b4d8b6ab8843052cba58a15296691a7a3df55143cd2"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9353c2af2dd467333d4850a16edb66855e795561cd170685178f706c80d2c71e"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83c155b4a33368d9b9c2543e78f2452090fb030c52401ca608ef16fa58c98353"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9850754c434e636ce3dc586f534bb23bcbd78940c304775bee9005bf610e98f1"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:275f500b4d26f67b6ec05629a4600645231bd75e4ed42087a7c1801bff04f4b3"},
+ {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5c2308de665b7130cd0e40a2329278226e4cf083f7400c51ca7e19ccfb3886f3"},
+ {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e06a9ebc8656e030ccfe44634f2a541b4b1801cd52e390a53ad8bacbd65f8518"},
+ {file = "safetensors-0.4.4-cp39-none-win32.whl", hash = "sha256:ef73df487b7c14b477016947c92708c2d929e1dee2bacdd6fff5a82ed4539537"},
+ {file = "safetensors-0.4.4-cp39-none-win_amd64.whl", hash = "sha256:83d054818a8d1198d8bd8bc3ea2aac112a2c19def2bf73758321976788706398"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1d1f34c71371f0e034004a0b583284b45d233dd0b5f64a9125e16b8a01d15067"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a8043a33d58bc9b30dfac90f75712134ca34733ec3d8267b1bd682afe7194f5"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db8f0c59c84792c12661f8efa85de160f80efe16b87a9d5de91b93f9e0bce3c"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfc1fc38e37630dd12d519bdec9dcd4b345aec9930bb9ce0ed04461f49e58b52"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c9d86d9b13b18aafa88303e2cd21e677f5da2a14c828d2c460fe513af2e9a5"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:43251d7f29a59120a26f5a0d9583b9e112999e500afabcfdcb91606d3c5c89e3"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:2c42e9b277513b81cf507e6121c7b432b3235f980cac04f39f435b7902857f91"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3daacc9a4e3f428a84dd56bf31f20b768eb0b204af891ed68e1f06db9edf546f"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218bbb9b883596715fc9997bb42470bf9f21bb832c3b34c2bf744d6fa8f2bbba"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bd5efc26b39f7fc82d4ab1d86a7f0644c8e34f3699c33f85bfa9a717a030e1b"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56ad9776b65d8743f86698a1973292c966cf3abff627efc44ed60e66cc538ddd"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:30f23e6253c5f43a809dea02dc28a9f5fa747735dc819f10c073fe1b605e97d4"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5512078d00263de6cb04e9d26c9ae17611098f52357fea856213e38dc462f81f"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b96c3d9266439d17f35fc2173111d93afc1162f168e95aed122c1ca517b1f8f1"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:08d464aa72a9a13826946b4fb9094bb4b16554bbea2e069e20bd903289b6ced9"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:210160816d5a36cf41f48f38473b6f70d7bcb4b0527bedf0889cc0b4c3bb07db"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb276a53717f2bcfb6df0bcf284d8a12069002508d4c1ca715799226024ccd45"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2c28c6487f17d8db0089e8b2cdc13de859366b94cc6cdc50e1b0a4147b56551"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7915f0c60e4e6e65d90f136d85dd3b429ae9191c36b380e626064694563dbd9f"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:00eea99ae422fbfa0b46065acbc58b46bfafadfcec179d4b4a32d5c45006af6c"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb1ed4fcb0b3c2f3ea2c5767434622fe5d660e5752f21ac2e8d737b1e5e480bb"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:73fc9a0a4343188bdb421783e600bfaf81d0793cd4cce6bafb3c2ed567a74cd5"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c37e6b714200824c73ca6eaf007382de76f39466a46e97558b8dc4cf643cfbf"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f75698c5c5c542417ac4956acfc420f7d4a2396adca63a015fd66641ea751759"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca1a209157f242eb183e209040097118472e169f2e069bfbd40c303e24866543"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:177f2b60a058f92a3cec7a1786c9106c29eca8987ecdfb79ee88126e5f47fa31"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ee9622e84fe6e4cd4f020e5fda70d6206feff3157731df7151d457fdae18e541"},
+ {file = "safetensors-0.4.4.tar.gz", hash = "sha256:5fe3e9b705250d0172ed4e100a811543108653fb2b66b9e702a088ad03772a07"},
]
[package.extras]
@@ -7610,13 +7683,13 @@ tornado = ["tornado (>=5)"]
[[package]]
name = "setuptools"
-version = "71.1.0"
+version = "72.1.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-71.1.0-py3-none-any.whl", hash = "sha256:33874fdc59b3188304b2e7c80d9029097ea31627180896fb549c578ceb8a0855"},
- {file = "setuptools-71.1.0.tar.gz", hash = "sha256:032d42ee9fb536e33087fb66cac5f840eb9391ed05637b3f2a76a7c8fb477936"},
+ {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"},
+ {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"},
]
[package.extras]
@@ -7760,60 +7833,60 @@ files = [
[[package]]
name = "sqlalchemy"
-version = "2.0.31"
+version = "2.0.32"
description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2a213c1b699d3f5768a7272de720387ae0122f1becf0901ed6eaa1abd1baf6c"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9fea3d0884e82d1e33226935dac990b967bef21315cbcc894605db3441347443"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ad7f221d8a69d32d197e5968d798217a4feebe30144986af71ada8c548e9fa"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2bee229715b6366f86a95d497c347c22ddffa2c7c96143b59a2aa5cc9eebbc"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cd5b94d4819c0c89280b7c6109c7b788a576084bf0a480ae17c227b0bc41e109"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:750900a471d39a7eeba57580b11983030517a1f512c2cb287d5ad0fcf3aebd58"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-win32.whl", hash = "sha256:7bd112be780928c7f493c1a192cd8c5fc2a2a7b52b790bc5a84203fb4381c6be"},
- {file = "SQLAlchemy-2.0.31-cp310-cp310-win_amd64.whl", hash = "sha256:5a48ac4d359f058474fadc2115f78a5cdac9988d4f99eae44917f36aa1476327"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f68470edd70c3ac3b6cd5c2a22a8daf18415203ca1b036aaeb9b0fb6f54e8298"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e2c38c2a4c5c634fe6c3c58a789712719fa1bf9b9d6ff5ebfce9a9e5b89c1ca"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd15026f77420eb2b324dcb93551ad9c5f22fab2c150c286ef1dc1160f110203"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2196208432deebdfe3b22185d46b08f00ac9d7b01284e168c212919891289396"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:352b2770097f41bff6029b280c0e03b217c2dcaddc40726f8f53ed58d8a85da4"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56d51ae825d20d604583f82c9527d285e9e6d14f9a5516463d9705dab20c3740"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-win32.whl", hash = "sha256:6e2622844551945db81c26a02f27d94145b561f9d4b0c39ce7bfd2fda5776dac"},
- {file = "SQLAlchemy-2.0.31-cp311-cp311-win_amd64.whl", hash = "sha256:ccaf1b0c90435b6e430f5dd30a5aede4764942a695552eb3a4ab74ed63c5b8d3"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3b74570d99126992d4b0f91fb87c586a574a5872651185de8297c6f90055ae42"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f77c4f042ad493cb8595e2f503c7a4fe44cd7bd59c7582fd6d78d7e7b8ec52c"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1591329333daf94467e699e11015d9c944f44c94d2091f4ac493ced0119449"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74afabeeff415e35525bf7a4ecdab015f00e06456166a2eba7590e49f8db940e"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b9c01990d9015df2c6f818aa8f4297d42ee71c9502026bb074e713d496e26b67"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66f63278db425838b3c2b1c596654b31939427016ba030e951b292e32b99553e"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-win32.whl", hash = "sha256:0b0f658414ee4e4b8cbcd4a9bb0fd743c5eeb81fc858ca517217a8013d282c96"},
- {file = "SQLAlchemy-2.0.31-cp312-cp312-win_amd64.whl", hash = "sha256:fa4b1af3e619b5b0b435e333f3967612db06351217c58bfb50cee5f003db2a5a"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f43e93057cf52a227eda401251c72b6fbe4756f35fa6bfebb5d73b86881e59b0"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d337bf94052856d1b330d5fcad44582a30c532a2463776e1651bd3294ee7e58b"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c06fb43a51ccdff3b4006aafee9fcf15f63f23c580675f7734245ceb6b6a9e05"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:b6e22630e89f0e8c12332b2b4c282cb01cf4da0d26795b7eae16702a608e7ca1"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:79a40771363c5e9f3a77f0e28b3302801db08040928146e6808b5b7a40749c88"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-win32.whl", hash = "sha256:501ff052229cb79dd4c49c402f6cb03b5a40ae4771efc8bb2bfac9f6c3d3508f"},
- {file = "SQLAlchemy-2.0.31-cp37-cp37m-win_amd64.whl", hash = "sha256:597fec37c382a5442ffd471f66ce12d07d91b281fd474289356b1a0041bdf31d"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dc6d69f8829712a4fd799d2ac8d79bdeff651c2301b081fd5d3fe697bd5b4ab9"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23b9fbb2f5dd9e630db70fbe47d963c7779e9c81830869bd7d137c2dc1ad05fb"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21c97efcbb9f255d5c12a96ae14da873233597dfd00a3a0c4ce5b3e5e79704"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26a6a9837589c42b16693cf7bf836f5d42218f44d198f9343dd71d3164ceeeac"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc251477eae03c20fae8db9c1c23ea2ebc47331bcd73927cdcaecd02af98d3c3"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2fd17e3bb8058359fa61248c52c7b09a97cf3c820e54207a50af529876451808"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-win32.whl", hash = "sha256:c76c81c52e1e08f12f4b6a07af2b96b9b15ea67ccdd40ae17019f1c373faa227"},
- {file = "SQLAlchemy-2.0.31-cp38-cp38-win_amd64.whl", hash = "sha256:4b600e9a212ed59355813becbcf282cfda5c93678e15c25a0ef896b354423238"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b6cf796d9fcc9b37011d3f9936189b3c8074a02a4ed0c0fbbc126772c31a6d4"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78fe11dbe37d92667c2c6e74379f75746dc947ee505555a0197cfba9a6d4f1a4"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fc47dc6185a83c8100b37acda27658fe4dbd33b7d5e7324111f6521008ab4fe"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a41514c1a779e2aa9a19f67aaadeb5cbddf0b2b508843fcd7bafdf4c6864005"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:afb6dde6c11ea4525318e279cd93c8734b795ac8bb5dda0eedd9ebaca7fa23f1"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3f9faef422cfbb8fd53716cd14ba95e2ef655400235c3dfad1b5f467ba179c8c"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-win32.whl", hash = "sha256:fc6b14e8602f59c6ba893980bea96571dd0ed83d8ebb9c4479d9ed5425d562e9"},
- {file = "SQLAlchemy-2.0.31-cp39-cp39-win_amd64.whl", hash = "sha256:3cb8a66b167b033ec72c3812ffc8441d4e9f5f78f5e31e54dcd4c90a4ca5bebc"},
- {file = "SQLAlchemy-2.0.31-py3-none-any.whl", hash = "sha256:69f3e3c08867a8e4856e92d7afb618b95cdee18e0bc1647b77599722c9a28911"},
- {file = "SQLAlchemy-2.0.31.tar.gz", hash = "sha256:b607489dd4a54de56984a0c7656247504bd5523d9d0ba799aef59d4add009484"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"},
+ {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"},
+ {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"},
+ {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"},
+ {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"},
+ {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"},
+ {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"},
+ {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"},
+ {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"},
]
[package.dependencies]
@@ -7879,17 +7952,20 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
[[package]]
name = "sympy"
-version = "1.12"
+version = "1.13.1"
description = "Computer algebra system (CAS) in Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"},
- {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"},
+ {file = "sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8"},
+ {file = "sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f"},
]
[package.dependencies]
-mpmath = ">=0.19"
+mpmath = ">=1.1.0,<1.4"
+
+[package.extras]
+dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"]
[[package]]
name = "tabulate"
@@ -7922,13 +7998,13 @@ requests = "*"
[[package]]
name = "tenacity"
-version = "8.3.0"
+version = "9.0.0"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.8"
files = [
- {file = "tenacity-8.3.0-py3-none-any.whl", hash = "sha256:3649f6443dbc0d9b01b9d8020a9c4ec7a1ff5f6f3c6c8a036ef371f573fe9185"},
- {file = "tenacity-8.3.0.tar.gz", hash = "sha256:953d4e6ad24357bceffbc9707bc74349aca9d245f68eb65419cf0c249a1949a2"},
+ {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"},
+ {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"},
]
[package.extras]
@@ -7937,13 +8013,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"]
[[package]]
name = "tencentcloud-sdk-python-common"
-version = "3.0.1196"
+version = "3.0.1206"
description = "Tencent Cloud Common SDK for Python"
optional = false
python-versions = "*"
files = [
- {file = "tencentcloud-sdk-python-common-3.0.1196.tar.gz", hash = "sha256:a8acd14f7480987ff0fd1d961ad934b2b7533ab1937d7e3adb74d95dc49954bd"},
- {file = "tencentcloud_sdk_python_common-3.0.1196-py2.py3-none-any.whl", hash = "sha256:5ed438bc3e2818ca8e84b3896aaa2746798fba981bd94b27528eb36efa5b4a30"},
+ {file = "tencentcloud-sdk-python-common-3.0.1206.tar.gz", hash = "sha256:e32745e6d46b94b2c2c33cd68c7e70bff3d63e8e5e5d314bb0b41616521c90f2"},
+ {file = "tencentcloud_sdk_python_common-3.0.1206-py2.py3-none-any.whl", hash = "sha256:2100697933d62135b093bae43eee0f8862b45ca0597da72779e304c9b392ac96"},
]
[package.dependencies]
@@ -7951,17 +8027,17 @@ requests = ">=2.16.0"
[[package]]
name = "tencentcloud-sdk-python-hunyuan"
-version = "3.0.1196"
+version = "3.0.1206"
description = "Tencent Cloud Hunyuan SDK for Python"
optional = false
python-versions = "*"
files = [
- {file = "tencentcloud-sdk-python-hunyuan-3.0.1196.tar.gz", hash = "sha256:ced26497ae5f1b8fcc6cbd12238109274251e82fa1cfedfd6700df776306a36c"},
- {file = "tencentcloud_sdk_python_hunyuan-3.0.1196-py2.py3-none-any.whl", hash = "sha256:d18a19cffeaf4ff8a60670dc2bdb644f3d7ae6a51c30d21b50ded24a9c542248"},
+ {file = "tencentcloud-sdk-python-hunyuan-3.0.1206.tar.gz", hash = "sha256:2c37f2f50e54d23905d91d7a511a217317d944c701127daae548b7275cc32968"},
+ {file = "tencentcloud_sdk_python_hunyuan-3.0.1206-py2.py3-none-any.whl", hash = "sha256:c650315bb5863f28d410fa1062122550d8015600947d04d95e2bff55d0590acc"},
]
[package.dependencies]
-tencentcloud-sdk-python-common = "3.0.1196"
+tencentcloud-sdk-python-common = "3.0.1206"
[[package]]
name = "threadpoolctl"
@@ -8225,13 +8301,13 @@ files = [
[[package]]
name = "tqdm"
-version = "4.66.4"
+version = "4.66.5"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
files = [
- {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"},
- {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"},
+ {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"},
+ {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"},
]
[package.dependencies]
@@ -8613,13 +8689,13 @@ zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "uvicorn"
-version = "0.30.3"
+version = "0.30.5"
description = "The lightning-fast ASGI server."
optional = false
python-versions = ">=3.8"
files = [
- {file = "uvicorn-0.30.3-py3-none-any.whl", hash = "sha256:94a3608da0e530cea8f69683aa4126364ac18e3826b6630d1a65f4638aade503"},
- {file = "uvicorn-0.30.3.tar.gz", hash = "sha256:0d114d0831ff1adbf231d358cbf42f17333413042552a624ea6a9b4c33dcfd81"},
+ {file = "uvicorn-0.30.5-py3-none-any.whl", hash = "sha256:b2d86de274726e9878188fa07576c9ceeff90a839e2b6e25c917fe05f5a6c835"},
+ {file = "uvicorn-0.30.5.tar.gz", hash = "sha256:ac6fdbd4425c5fd17a9fe39daf4d4d075da6fdc80f653e5894cdc2fd98752bee"},
]
[package.dependencies]
@@ -9344,47 +9420,45 @@ test = ["zope.testrunner"]
[[package]]
name = "zope-interface"
-version = "6.4.post2"
+version = "7.0.1"
description = "Interfaces for Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "zope.interface-6.4.post2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2eccd5bef45883802848f821d940367c1d0ad588de71e5cabe3813175444202c"},
- {file = "zope.interface-6.4.post2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:762e616199f6319bb98e7f4f27d254c84c5fb1c25c908c2a9d0f92b92fb27530"},
- {file = "zope.interface-6.4.post2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ef8356f16b1a83609f7a992a6e33d792bb5eff2370712c9eaae0d02e1924341"},
- {file = "zope.interface-6.4.post2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e4fa5d34d7973e6b0efa46fe4405090f3b406f64b6290facbb19dcbf642ad6b"},
- {file = "zope.interface-6.4.post2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d22fce0b0f5715cdac082e35a9e735a1752dc8585f005d045abb1a7c20e197f9"},
- {file = "zope.interface-6.4.post2-cp310-cp310-win_amd64.whl", hash = "sha256:97e615eab34bd8477c3f34197a17ce08c648d38467489359cb9eb7394f1083f7"},
- {file = "zope.interface-6.4.post2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:599f3b07bde2627e163ce484d5497a54a0a8437779362395c6b25e68c6590ede"},
- {file = "zope.interface-6.4.post2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:136cacdde1a2c5e5bc3d0b2a1beed733f97e2dad8c2ad3c2e17116f6590a3827"},
- {file = "zope.interface-6.4.post2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47937cf2e7ed4e0e37f7851c76edeb8543ec9b0eae149b36ecd26176ff1ca874"},
- {file = "zope.interface-6.4.post2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f0a6be264afb094975b5ef55c911379d6989caa87c4e558814ec4f5125cfa2e"},
- {file = "zope.interface-6.4.post2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47654177e675bafdf4e4738ce58cdc5c6d6ee2157ac0a78a3fa460942b9d64a8"},
- {file = "zope.interface-6.4.post2-cp311-cp311-win_amd64.whl", hash = "sha256:e2fb8e8158306567a3a9a41670c1ff99d0567d7fc96fa93b7abf8b519a46b250"},
- {file = "zope.interface-6.4.post2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b912750b13d76af8aac45ddf4679535def304b2a48a07989ec736508d0bbfbde"},
- {file = "zope.interface-6.4.post2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4ac46298e0143d91e4644a27a769d1388d5d89e82ee0cf37bf2b0b001b9712a4"},
- {file = "zope.interface-6.4.post2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86a94af4a88110ed4bb8961f5ac72edf782958e665d5bfceaab6bf388420a78b"},
- {file = "zope.interface-6.4.post2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73f9752cf3596771c7726f7eea5b9e634ad47c6d863043589a1c3bb31325c7eb"},
- {file = "zope.interface-6.4.post2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b5c3e9744dcdc9e84c24ed6646d5cf0cf66551347b310b3ffd70f056535854"},
- {file = "zope.interface-6.4.post2-cp312-cp312-win_amd64.whl", hash = "sha256:551db2fe892fcbefb38f6f81ffa62de11090c8119fd4e66a60f3adff70751ec7"},
- {file = "zope.interface-6.4.post2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96ac6b3169940a8cd57b4f2b8edcad8f5213b60efcd197d59fbe52f0accd66e"},
- {file = "zope.interface-6.4.post2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cebff2fe5dc82cb22122e4e1225e00a4a506b1a16fafa911142ee124febf2c9e"},
- {file = "zope.interface-6.4.post2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33ee982237cffaf946db365c3a6ebaa37855d8e3ca5800f6f48890209c1cfefc"},
- {file = "zope.interface-6.4.post2-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:fbf649bc77510ef2521cf797700b96167bb77838c40780da7ea3edd8b78044d1"},
- {file = "zope.interface-6.4.post2-cp37-cp37m-win_amd64.whl", hash = "sha256:4c0b208a5d6c81434bdfa0f06d9b667e5de15af84d8cae5723c3a33ba6611b82"},
- {file = "zope.interface-6.4.post2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d3fe667935e9562407c2511570dca14604a654988a13d8725667e95161d92e9b"},
- {file = "zope.interface-6.4.post2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a96e6d4074db29b152222c34d7eec2e2db2f92638d2b2b2c704f9e8db3ae0edc"},
- {file = "zope.interface-6.4.post2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:866a0f583be79f0def667a5d2c60b7b4cc68f0c0a470f227e1122691b443c934"},
- {file = "zope.interface-6.4.post2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fe919027f29b12f7a2562ba0daf3e045cb388f844e022552a5674fcdf5d21f1"},
- {file = "zope.interface-6.4.post2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e0343a6e06d94f6b6ac52fbc75269b41dd3c57066541a6c76517f69fe67cb43"},
- {file = "zope.interface-6.4.post2-cp38-cp38-win_amd64.whl", hash = "sha256:dabb70a6e3d9c22df50e08dc55b14ca2a99da95a2d941954255ac76fd6982bc5"},
- {file = "zope.interface-6.4.post2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:706efc19f9679a1b425d6fa2b4bc770d976d0984335eaea0869bd32f627591d2"},
- {file = "zope.interface-6.4.post2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d136e5b8821073e1a09dde3eb076ea9988e7010c54ffe4d39701adf0c303438"},
- {file = "zope.interface-6.4.post2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1730c93a38b5a18d24549bc81613223962a19d457cfda9bdc66e542f475a36f4"},
- {file = "zope.interface-6.4.post2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc2676312cc3468a25aac001ec727168994ea3b69b48914944a44c6a0b251e79"},
- {file = "zope.interface-6.4.post2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a62fd6cd518693568e23e02f41816adedfca637f26716837681c90b36af3671"},
- {file = "zope.interface-6.4.post2-cp39-cp39-win_amd64.whl", hash = "sha256:d3f7e001328bd6466b3414215f66dde3c7c13d8025a9c160a75d7b2687090d15"},
- {file = "zope.interface-6.4.post2.tar.gz", hash = "sha256:1c207e6f6dfd5749a26f5a5fd966602d6b824ec00d2df84a7e9a924e8933654e"},
+ {file = "zope.interface-7.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ec4e87e6fdc511a535254daa122c20e11959ce043b4e3425494b237692a34f1c"},
+ {file = "zope.interface-7.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51d5713e8e38f2d3ec26e0dfdca398ed0c20abda2eb49ffc15a15a23eb8e5f6d"},
+ {file = "zope.interface-7.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea8d51e5eb29e57d34744369cd08267637aa5a0fefc9b5d33775ab7ff2ebf2e3"},
+ {file = "zope.interface-7.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55bbcc74dc0c7ab489c315c28b61d7a1d03cf938cc99cc58092eb065f120c3a5"},
+ {file = "zope.interface-7.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10ebac566dd0cec66f942dc759d46a994a2b3ba7179420f0e2130f88f8a5f400"},
+ {file = "zope.interface-7.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7039e624bcb820f77cc2ff3d1adcce531932990eee16121077eb51d9c76b6c14"},
+ {file = "zope.interface-7.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03bd5c0db82237bbc47833a8b25f1cc090646e212f86b601903d79d7e6b37031"},
+ {file = "zope.interface-7.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f52050c6a10d4a039ec6f2c58e5b3ade5cc570d16cf9d102711e6b8413c90e6"},
+ {file = "zope.interface-7.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af0b33f04677b57843d529b9257a475d2865403300b48c67654c40abac2f9f24"},
+ {file = "zope.interface-7.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:696c2a381fc7876b3056711717dba5eddd07c2c9e5ccd50da54029a1293b6e43"},
+ {file = "zope.interface-7.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f89a420cf5a6f2aa7849dd59e1ff0e477f562d97cf8d6a1ee03461e1eec39887"},
+ {file = "zope.interface-7.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:b59deb0ddc7b431e41d720c00f99d68b52cb9bd1d5605a085dc18f502fe9c47f"},
+ {file = "zope.interface-7.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52f5253cca1b35eaeefa51abd366b87f48f8714097c99b131ba61f3fdbbb58e7"},
+ {file = "zope.interface-7.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:88d108d004e0df25224de77ce349a7e73494ea2cb194031f7c9687e68a88ec9b"},
+ {file = "zope.interface-7.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c203d82069ba31e1f3bc7ba530b2461ec86366cd4bfc9b95ec6ce58b1b559c34"},
+ {file = "zope.interface-7.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f3495462bc0438b76536a0e10d765b168ae636092082531b88340dc40dcd118"},
+ {file = "zope.interface-7.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:192b7a792e3145ed880ff6b1a206fdb783697cfdb4915083bfca7065ec845e60"},
+ {file = "zope.interface-7.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:400d06c9ec8dbcc96f56e79376297e7be07a315605c9a2208720da263d44d76f"},
+ {file = "zope.interface-7.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c1dff87b30fd150c61367d0e2cdc49bb55f8b9fd2a303560bbc24b951573ae1"},
+ {file = "zope.interface-7.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f749ca804648d00eda62fe1098f229b082dfca930d8bad8386e572a6eafa7525"},
+ {file = "zope.interface-7.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ec212037becf6d2f705b7ed4538d56980b1e7bba237df0d8995cbbed29961dc"},
+ {file = "zope.interface-7.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d33cb526efdc235a2531433fc1287fcb80d807d5b401f9b801b78bf22df560dd"},
+ {file = "zope.interface-7.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b419f2144e1762ab845f20316f1df36b15431f2622ebae8a6d5f7e8e712b413c"},
+ {file = "zope.interface-7.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03f1452d5d1f279184d5bdb663a3dc39902d9320eceb63276240791e849054b6"},
+ {file = "zope.interface-7.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ba4b3638d014918b918aa90a9c8370bd74a03abf8fcf9deb353b3a461a59a84"},
+ {file = "zope.interface-7.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc0615351221926a36a0fbcb2520fb52e0b23e8c22a43754d9cb8f21358c33c0"},
+ {file = "zope.interface-7.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:ce6cbb852fb8f2f9bb7b9cdca44e2e37bce783b5f4c167ff82cb5f5128163c8f"},
+ {file = "zope.interface-7.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5566fd9271c89ad03d81b0831c37d46ae5e2ed211122c998637130159a120cf1"},
+ {file = "zope.interface-7.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da0cef4d7e3f19c3bd1d71658d6900321af0492fee36ec01b550a10924cffb9c"},
+ {file = "zope.interface-7.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f32ca483e6ade23c7caaee9d5ee5d550cf4146e9b68d2fb6c68bac183aa41c37"},
+ {file = "zope.interface-7.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da21e7eec49252df34d426c2ee9cf0361c923026d37c24728b0fa4cc0599fd03"},
+ {file = "zope.interface-7.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a8195b99e650e6f329ce4e5eb22d448bdfef0406404080812bc96e2a05674cb"},
+ {file = "zope.interface-7.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:19c829d52e921b9fe0b2c0c6a8f9a2508c49678ee1be598f87d143335b6a35dc"},
+ {file = "zope.interface-7.0.1.tar.gz", hash = "sha256:f0f5fda7cbf890371a59ab1d06512da4f2c89a6ea194e595808123c863c38eff"},
]
[package.dependencies]
@@ -9510,4 +9584,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
-content-hash = "d40cddaa8cd9c7ee7f8bbca06c8dd844facf9b2b618131dd85a41da5e0d47125"
+content-hash = "05dfa6b9bce9ed8ac21caf58eff1596f146080ab2ab6987924b189be673c22cf"
diff --git a/api/pyproject.toml b/api/pyproject.toml
index 15f9aab64099ec..3e107f5e9b0bcc 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -69,7 +69,18 @@ ignore = [
]
[tool.ruff.format]
-quote-style = "single"
+exclude = [
+ "core/**/*.py",
+ "controllers/**/*.py",
+ "models/**/*.py",
+ "utils/**/*.py",
+ "migrations/**/*",
+ "services/**/*.py",
+ "tasks/**/*.py",
+ "tests/**/*.py",
+ "libs/**/*.py",
+ "configs/**/*.py",
+]
[tool.pytest_env]
OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii"
@@ -93,6 +104,8 @@ CODE_MAX_STRING_LENGTH = "80000"
CODE_EXECUTION_ENDPOINT = "http://127.0.0.1:8194"
CODE_EXECUTION_API_KEY = "dify-sandbox"
FIRECRAWL_API_KEY = "fc-"
+TEI_EMBEDDING_SERVER_URL = "http://a.abc.com:11451"
+TEI_RERANK_SERVER_URL = "http://a.abc.com:11451"
[tool.poetry]
name = "dify-api"
@@ -108,7 +121,7 @@ authlib = "1.3.1"
azure-identity = "1.16.1"
azure-storage-blob = "12.13.0"
beautifulsoup4 = "4.12.2"
-boto3 = "1.34.136"
+boto3 = "1.34.148"
bs4 = "~0.0.1"
cachetools = "~5.3.0"
celery = "~5.3.6"
@@ -179,6 +192,7 @@ zhipuai = "1.0.7"
rank-bm25 = "~0.2.2"
openpyxl = "^3.1.5"
kaleido = "0.2.1"
+elasticsearch = "8.14.0"
############################################################
# Tool dependencies required by tool implementations
@@ -240,5 +254,5 @@ pytest-mock = "~3.14.0"
optional = true
[tool.poetry.group.lint.dependencies]
-ruff = "~0.5.1"
+ruff = "~0.5.7"
dotenv-linter = "~0.5.0"
diff --git a/api/schedule/clean_embedding_cache_task.py b/api/schedule/clean_embedding_cache_task.py
index ccc1062266a02f..67d070682867bb 100644
--- a/api/schedule/clean_embedding_cache_task.py
+++ b/api/schedule/clean_embedding_cache_task.py
@@ -11,27 +11,32 @@
from models.dataset import Embedding
-@app.celery.task(queue='dataset')
+@app.celery.task(queue="dataset")
def clean_embedding_cache_task():
- click.echo(click.style('Start clean embedding cache.', fg='green'))
+ click.echo(click.style("Start clean embedding cache.", fg="green"))
clean_days = int(dify_config.CLEAN_DAY_SETTING)
start_at = time.perf_counter()
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days)
while True:
try:
- embedding_ids = db.session.query(Embedding.id).filter(Embedding.created_at < thirty_days_ago) \
- .order_by(Embedding.created_at.desc()).limit(100).all()
+ embedding_ids = (
+ db.session.query(Embedding.id)
+ .filter(Embedding.created_at < thirty_days_ago)
+ .order_by(Embedding.created_at.desc())
+ .limit(100)
+ .all()
+ )
embedding_ids = [embedding_id[0] for embedding_id in embedding_ids]
except NotFound:
break
if embedding_ids:
for embedding_id in embedding_ids:
- db.session.execute(text(
- "DELETE FROM embeddings WHERE id = :embedding_id"
- ), {'embedding_id': embedding_id})
+ db.session.execute(
+ text("DELETE FROM embeddings WHERE id = :embedding_id"), {"embedding_id": embedding_id}
+ )
db.session.commit()
else:
break
end_at = time.perf_counter()
- click.echo(click.style('Cleaned embedding cache from db success latency: {}'.format(end_at - start_at), fg='green'))
+ click.echo(click.style("Cleaned embedding cache from db success latency: {}".format(end_at - start_at), fg="green"))
diff --git a/api/schedule/clean_unused_datasets_task.py b/api/schedule/clean_unused_datasets_task.py
index b2b2f82b786f5e..3d799bfd4ef732 100644
--- a/api/schedule/clean_unused_datasets_task.py
+++ b/api/schedule/clean_unused_datasets_task.py
@@ -12,9 +12,9 @@
from models.dataset import Dataset, DatasetQuery, Document
-@app.celery.task(queue='dataset')
+@app.celery.task(queue="dataset")
def clean_unused_datasets_task():
- click.echo(click.style('Start clean unused datasets indexes.', fg='green'))
+ click.echo(click.style("Start clean unused datasets indexes.", fg="green"))
clean_days = dify_config.CLEAN_DAY_SETTING
start_at = time.perf_counter()
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days)
@@ -22,40 +22,44 @@ def clean_unused_datasets_task():
while True:
try:
# Subquery for counting new documents
- document_subquery_new = db.session.query(
- Document.dataset_id,
- func.count(Document.id).label('document_count')
- ).filter(
- Document.indexing_status == 'completed',
- Document.enabled == True,
- Document.archived == False,
- Document.updated_at > thirty_days_ago
- ).group_by(Document.dataset_id).subquery()
+ document_subquery_new = (
+ db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
+ .filter(
+ Document.indexing_status == "completed",
+ Document.enabled == True,
+ Document.archived == False,
+ Document.updated_at > thirty_days_ago,
+ )
+ .group_by(Document.dataset_id)
+ .subquery()
+ )
# Subquery for counting old documents
- document_subquery_old = db.session.query(
- Document.dataset_id,
- func.count(Document.id).label('document_count')
- ).filter(
- Document.indexing_status == 'completed',
- Document.enabled == True,
- Document.archived == False,
- Document.updated_at < thirty_days_ago
- ).group_by(Document.dataset_id).subquery()
+ document_subquery_old = (
+ db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
+ .filter(
+ Document.indexing_status == "completed",
+ Document.enabled == True,
+ Document.archived == False,
+ Document.updated_at < thirty_days_ago,
+ )
+ .group_by(Document.dataset_id)
+ .subquery()
+ )
# Main query with join and filter
- datasets = (db.session.query(Dataset)
- .outerjoin(
- document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id
- ).outerjoin(
- document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id
- ).filter(
- Dataset.created_at < thirty_days_ago,
- func.coalesce(document_subquery_new.c.document_count, 0) == 0,
- func.coalesce(document_subquery_old.c.document_count, 0) > 0
- ).order_by(
- Dataset.created_at.desc()
- ).paginate(page=page, per_page=50))
+ datasets = (
+ db.session.query(Dataset)
+ .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
+ .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
+ .filter(
+ Dataset.created_at < thirty_days_ago,
+ func.coalesce(document_subquery_new.c.document_count, 0) == 0,
+ func.coalesce(document_subquery_old.c.document_count, 0) > 0,
+ )
+ .order_by(Dataset.created_at.desc())
+ .paginate(page=page, per_page=50)
+ )
except NotFound:
break
@@ -63,10 +67,11 @@ def clean_unused_datasets_task():
break
page += 1
for dataset in datasets:
- dataset_query = db.session.query(DatasetQuery).filter(
- DatasetQuery.created_at > thirty_days_ago,
- DatasetQuery.dataset_id == dataset.id
- ).all()
+ dataset_query = (
+ db.session.query(DatasetQuery)
+ .filter(DatasetQuery.created_at > thirty_days_ago, DatasetQuery.dataset_id == dataset.id)
+ .all()
+ )
if not dataset_query or len(dataset_query) == 0:
try:
# remove index
@@ -74,17 +79,14 @@ def clean_unused_datasets_task():
index_processor.clean(dataset, None)
# update document
- update_params = {
- Document.enabled: False
- }
+ update_params = {Document.enabled: False}
Document.query.filter_by(dataset_id=dataset.id).update(update_params)
db.session.commit()
- click.echo(click.style('Cleaned unused dataset {} from db success!'.format(dataset.id),
- fg='green'))
+ click.echo(click.style("Cleaned unused dataset {} from db success!".format(dataset.id), fg="green"))
except Exception as e:
click.echo(
- click.style('clean dataset index error: {} {}'.format(e.__class__.__name__, str(e)),
- fg='red'))
+ click.style("clean dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
+ )
end_at = time.perf_counter()
- click.echo(click.style('Cleaned unused dataset from db success latency: {}'.format(end_at - start_at), fg='green'))
+ click.echo(click.style("Cleaned unused dataset from db success latency: {}".format(end_at - start_at), fg="green"))
diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py
index 3764166333255d..bfb160b3e476d9 100644
--- a/api/services/app_dsl_service.py
+++ b/api/services/app_dsl_service.py
@@ -13,9 +13,9 @@
logger = logging.getLogger(__name__)
-current_dsl_version = "0.1.0"
+current_dsl_version = "0.1.1"
dsl_to_dify_version_mapping: dict[str, str] = {
- "0.1.0": "0.6.0", # dsl version -> from dify version
+ "0.1.1": "0.6.0", # dsl version -> from dify version
}
@@ -176,7 +176,7 @@ def export_dsl(cls, app_model: App, include_secret:bool = False) -> str:
else:
cls._append_model_config_export_data(export_data, app_model)
- return yaml.dump(export_data)
+ return yaml.dump(export_data, allow_unicode=True)
@classmethod
def _check_or_fix_dsl(cls, import_data: dict) -> dict:
@@ -238,6 +238,8 @@ def _import_and_create_new_workflow_based_app(cls,
# init draft workflow
environment_variables_list = workflow_data.get('environment_variables') or []
environment_variables = [factory.build_variable_from_mapping(obj) for obj in environment_variables_list]
+ conversation_variables_list = workflow_data.get('conversation_variables') or []
+ conversation_variables = [factory.build_variable_from_mapping(obj) for obj in conversation_variables_list]
workflow_service = WorkflowService()
draft_workflow = workflow_service.sync_draft_workflow(
app_model=app,
@@ -246,6 +248,7 @@ def _import_and_create_new_workflow_based_app(cls,
unique_hash=None,
account=account,
environment_variables=environment_variables,
+ conversation_variables=conversation_variables,
)
workflow_service.publish_workflow(
app_model=app,
@@ -282,6 +285,8 @@ def _import_and_overwrite_workflow_based_app(cls,
# sync draft workflow
environment_variables_list = workflow_data.get('environment_variables') or []
environment_variables = [factory.build_variable_from_mapping(obj) for obj in environment_variables_list]
+ conversation_variables_list = workflow_data.get('conversation_variables') or []
+ conversation_variables = [factory.build_variable_from_mapping(obj) for obj in conversation_variables_list]
draft_workflow = workflow_service.sync_draft_workflow(
app_model=app_model,
graph=workflow_data.get('graph', {}),
@@ -289,6 +294,7 @@ def _import_and_overwrite_workflow_based_app(cls,
unique_hash=unique_hash,
account=account,
environment_variables=environment_variables,
+ conversation_variables=conversation_variables,
)
return draft_workflow
diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py
index 69274dff09a9a0..de5f6994b0ebca 100644
--- a/api/services/hit_testing_service.py
+++ b/api/services/hit_testing_service.py
@@ -42,11 +42,12 @@ def retrieve(cls, dataset: Dataset, query: str, account: Account, retrieval_mode
dataset_id=dataset.id,
query=cls.escape_query_for_search(query),
top_k=retrieval_model.get('top_k', 2),
- score_threshold=retrieval_model['score_threshold']
+ score_threshold=retrieval_model.get('score_threshold', .0)
if retrieval_model['score_threshold_enabled'] else None,
- reranking_model=retrieval_model['reranking_model']
+ reranking_model=retrieval_model.get('reranking_model', None)
if retrieval_model['reranking_enable'] else None,
- reranking_mode=retrieval_model.get('reranking_mode', None),
+ reranking_mode=retrieval_model.get('reranking_mode')
+ if retrieval_model.get('reranking_mode') else 'reranking_model',
weights=retrieval_model.get('weights', None),
)
diff --git a/api/services/message_service.py b/api/services/message_service.py
index e310d70d5314e7..491a914c776387 100644
--- a/api/services/message_service.py
+++ b/api/services/message_service.py
@@ -7,7 +7,8 @@
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
-from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
+from core.ops.entities.trace_entity import TraceTaskName
+from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
from core.ops.utils import measure_time
from extensions.ext_database import db
from libs.infinite_scroll_pagination import InfiniteScrollPagination
diff --git a/api/services/model_load_balancing_service.py b/api/services/model_load_balancing_service.py
index 09838399961c7f..80eb72140d19b5 100644
--- a/api/services/model_load_balancing_service.py
+++ b/api/services/model_load_balancing_service.py
@@ -4,6 +4,7 @@
from json import JSONDecodeError
from typing import Optional
+from constants import HIDDEN_VALUE
from core.entities.provider_configuration import ProviderConfiguration
from core.helper import encrypter
from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderCredentialsCacheType
@@ -511,7 +512,7 @@ def _custom_credentials_validate(self, tenant_id: str,
for key, value in credentials.items():
if key in provider_credential_secret_variables:
# if send [__HIDDEN__] in secret input, it will be same as original value
- if value == '[__HIDDEN__]' and key in original_credentials:
+ if value == HIDDEN_VALUE and key in original_credentials:
credentials[key] = encrypter.decrypt_token(tenant_id, original_credentials[key])
if validate:
diff --git a/api/services/workflow/workflow_converter.py b/api/services/workflow/workflow_converter.py
index 06b129be691010..f993608293cc8c 100644
--- a/api/services/workflow/workflow_converter.py
+++ b/api/services/workflow/workflow_converter.py
@@ -6,7 +6,6 @@
DatasetRetrieveConfigEntity,
EasyUIBasedAppConfig,
ExternalDataVariableEntity,
- FileExtraConfig,
ModelConfigEntity,
PromptTemplateEntity,
VariableEntity,
@@ -14,6 +13,7 @@
from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfigManager
from core.app.apps.chat.app_config_manager import ChatAppConfigManager
from core.app.apps.completion.app_config_manager import CompletionAppConfigManager
+from core.file.file_obj import FileExtraConfig
from core.helper import encrypter
from core.model_runtime.entities.llm_entities import LLMMode
from core.model_runtime.utils.encoders import jsonable_encoder
diff --git a/api/services/workflow_app_service.py b/api/services/workflow_app_service.py
index 047678837509e2..c4d3d2763189a4 100644
--- a/api/services/workflow_app_service.py
+++ b/api/services/workflow_app_service.py
@@ -1,3 +1,5 @@
+import uuid
+
from flask_sqlalchemy.pagination import Pagination
from sqlalchemy import and_, or_
@@ -25,20 +27,26 @@ def get_paginate_workflow_app_logs(self, app_model: App, args: dict) -> Paginati
)
status = WorkflowRunStatus.value_of(args.get('status')) if args.get('status') else None
- if args['keyword'] or status:
+ keyword = args['keyword']
+ if keyword or status:
query = query.join(
WorkflowRun, WorkflowRun.id == WorkflowAppLog.workflow_run_id
)
- if args['keyword']:
- keyword_val = f"%{args['keyword'][:30]}%"
+ if keyword:
+ keyword_like_val = f"%{args['keyword'][:30]}%"
keyword_conditions = [
- WorkflowRun.inputs.ilike(keyword_val),
- WorkflowRun.outputs.ilike(keyword_val),
+ WorkflowRun.inputs.ilike(keyword_like_val),
+ WorkflowRun.outputs.ilike(keyword_like_val),
# filter keyword by end user session id if created by end user role
- and_(WorkflowRun.created_by_role == 'end_user', EndUser.session_id.ilike(keyword_val))
+ and_(WorkflowRun.created_by_role == 'end_user', EndUser.session_id.ilike(keyword_like_val))
]
+ # filter keyword by workflow run id
+ keyword_uuid = self._safe_parse_uuid(keyword)
+ if keyword_uuid:
+ keyword_conditions.append(WorkflowRun.id == keyword_uuid)
+
query = query.outerjoin(
EndUser,
and_(WorkflowRun.created_by == EndUser.id, WorkflowRun.created_by_role == CreatedByRole.END_USER.value)
@@ -60,3 +68,14 @@ def get_paginate_workflow_app_logs(self, app_model: App, args: dict) -> Paginati
)
return pagination
+
+ @staticmethod
+ def _safe_parse_uuid(value: str):
+ # fast check
+ if len(value) < 32:
+ return None
+
+ try:
+ return uuid.UUID(value)
+ except ValueError:
+ return None
diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py
index d868255f96e419..2defb4cd6a7088 100644
--- a/api/services/workflow_service.py
+++ b/api/services/workflow_service.py
@@ -72,6 +72,7 @@ def sync_draft_workflow(
unique_hash: Optional[str],
account: Account,
environment_variables: Sequence[Variable],
+ conversation_variables: Sequence[Variable],
) -> Workflow:
"""
Sync draft workflow
@@ -99,7 +100,8 @@ def sync_draft_workflow(
graph=json.dumps(graph),
features=json.dumps(features),
created_by=account.id,
- environment_variables=environment_variables
+ environment_variables=environment_variables,
+ conversation_variables=conversation_variables,
)
db.session.add(workflow)
# update draft workflow if found
@@ -109,6 +111,7 @@ def sync_draft_workflow(
workflow.updated_by = account.id
workflow.updated_at = datetime.now(timezone.utc).replace(tzinfo=None)
workflow.environment_variables = environment_variables
+ workflow.conversation_variables = conversation_variables
# commit db session changes
db.session.commit()
@@ -145,7 +148,8 @@ def publish_workflow(self, app_model: App,
graph=draft_workflow.graph,
features=draft_workflow.features,
created_by=account.id,
- environment_variables=draft_workflow.environment_variables
+ environment_variables=draft_workflow.environment_variables,
+ conversation_variables=draft_workflow.conversation_variables,
)
# commit db session changes
@@ -319,3 +323,25 @@ def validate_features_structure(self, app_model: App, features: dict) -> dict:
)
else:
raise ValueError(f"Invalid app mode: {app_model.mode}")
+
+ @classmethod
+ def get_elapsed_time(cls, workflow_run_id: str) -> float:
+ """
+ Get elapsed time
+ """
+ elapsed_time = 0.0
+
+ # fetch workflow node execution by workflow_run_id
+ workflow_nodes = (
+ db.session.query(WorkflowNodeExecution)
+ .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id)
+ .order_by(WorkflowNodeExecution.created_at.asc())
+ .all()
+ )
+ if not workflow_nodes:
+ return elapsed_time
+
+ for node in workflow_nodes:
+ elapsed_time += node.elapsed_time
+
+ return elapsed_time
diff --git a/api/tasks/deal_dataset_vector_index_task.py b/api/tasks/deal_dataset_vector_index_task.py
index c1b0e7f1a4cf91..ce93e111e54aa4 100644
--- a/api/tasks/deal_dataset_vector_index_task.py
+++ b/api/tasks/deal_dataset_vector_index_task.py
@@ -42,31 +42,42 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
).all()
if dataset_documents:
- documents = []
- for dataset_document in dataset_documents:
- # delete from vector index
- segments = db.session.query(DocumentSegment).filter(
- DocumentSegment.document_id == dataset_document.id,
- DocumentSegment.enabled == True
- ) .order_by(DocumentSegment.position.asc()).all()
- for segment in segments:
- document = Document(
- page_content=segment.content,
- metadata={
- "doc_id": segment.index_node_id,
- "doc_hash": segment.index_node_hash,
- "document_id": segment.document_id,
- "dataset_id": segment.dataset_id,
- }
- )
+ dataset_documents_ids = [doc.id for doc in dataset_documents]
+ db.session.query(DatasetDocument).filter(DatasetDocument.id.in_(dataset_documents_ids)) \
+ .update({"indexing_status": "indexing"}, synchronize_session=False)
+ db.session.commit()
- documents.append(document)
+ for dataset_document in dataset_documents:
+ try:
+ # add from vector index
+ segments = db.session.query(DocumentSegment).filter(
+ DocumentSegment.document_id == dataset_document.id,
+ DocumentSegment.enabled == True
+ ) .order_by(DocumentSegment.position.asc()).all()
+ if segments:
+ documents = []
+ for segment in segments:
+ document = Document(
+ page_content=segment.content,
+ metadata={
+ "doc_id": segment.index_node_id,
+ "doc_hash": segment.index_node_hash,
+ "document_id": segment.document_id,
+ "dataset_id": segment.dataset_id,
+ }
+ )
- # save vector index
- index_processor.load(dataset, documents, with_keywords=False)
+ documents.append(document)
+ # save vector index
+ index_processor.load(dataset, documents, with_keywords=False)
+ db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
+ .update({"indexing_status": "completed"}, synchronize_session=False)
+ db.session.commit()
+ except Exception as e:
+ db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
+ .update({"indexing_status": "error", "error": str(e)}, synchronize_session=False)
+ db.session.commit()
elif action == 'update':
- # clean index
- index_processor.clean(dataset, None, with_keywords=False)
dataset_documents = db.session.query(DatasetDocument).filter(
DatasetDocument.dataset_id == dataset_id,
DatasetDocument.indexing_status == 'completed',
@@ -75,28 +86,46 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
).all()
# add new index
if dataset_documents:
- documents = []
+ # update document status
+ dataset_documents_ids = [doc.id for doc in dataset_documents]
+ db.session.query(DatasetDocument).filter(DatasetDocument.id.in_(dataset_documents_ids)) \
+ .update({"indexing_status": "indexing"}, synchronize_session=False)
+ db.session.commit()
+
+ # clean index
+ index_processor.clean(dataset, None, with_keywords=False)
+
for dataset_document in dataset_documents:
- # delete from vector index
- segments = db.session.query(DocumentSegment).filter(
- DocumentSegment.document_id == dataset_document.id,
- DocumentSegment.enabled == True
- ).order_by(DocumentSegment.position.asc()).all()
- for segment in segments:
- document = Document(
- page_content=segment.content,
- metadata={
- "doc_id": segment.index_node_id,
- "doc_hash": segment.index_node_hash,
- "document_id": segment.document_id,
- "dataset_id": segment.dataset_id,
- }
- )
+ # update from vector index
+ try:
+ segments = db.session.query(DocumentSegment).filter(
+ DocumentSegment.document_id == dataset_document.id,
+ DocumentSegment.enabled == True
+ ).order_by(DocumentSegment.position.asc()).all()
+ if segments:
+ documents = []
+ for segment in segments:
+ document = Document(
+ page_content=segment.content,
+ metadata={
+ "doc_id": segment.index_node_id,
+ "doc_hash": segment.index_node_hash,
+ "document_id": segment.document_id,
+ "dataset_id": segment.dataset_id,
+ }
+ )
- documents.append(document)
+ documents.append(document)
+ # save vector index
+ index_processor.load(dataset, documents, with_keywords=False)
+ db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
+ .update({"indexing_status": "completed"}, synchronize_session=False)
+ db.session.commit()
+ except Exception as e:
+ db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document.id) \
+ .update({"indexing_status": "error", "error": str(e)}, synchronize_session=False)
+ db.session.commit()
- # save vector index
- index_processor.load(dataset, documents, with_keywords=False)
end_at = time.perf_counter()
logging.info(
diff --git a/api/tasks/remove_app_and_related_data_task.py b/api/tasks/remove_app_and_related_data_task.py
index 378756e68c8202..4efe7ee38c0b32 100644
--- a/api/tasks/remove_app_and_related_data_task.py
+++ b/api/tasks/remove_app_and_related_data_task.py
@@ -1,8 +1,10 @@
import logging
import time
+from collections.abc import Callable
import click
from celery import shared_task
+from sqlalchemy import delete
from sqlalchemy.exc import SQLAlchemyError
from extensions.ext_database import db
@@ -28,7 +30,7 @@
)
from models.tools import WorkflowToolProvider
from models.web import PinnedConversation, SavedMessage
-from models.workflow import Workflow, WorkflowAppLog, WorkflowNodeExecution, WorkflowRun
+from models.workflow import ConversationVariable, Workflow, WorkflowAppLog, WorkflowNodeExecution, WorkflowRun
@shared_task(queue='app_deletion', bind=True, max_retries=3)
@@ -54,6 +56,7 @@ def remove_app_and_related_data_task(self, tenant_id: str, app_id: str):
_delete_app_tag_bindings(tenant_id, app_id)
_delete_end_users(tenant_id, app_id)
_delete_trace_app_configs(tenant_id, app_id)
+ _delete_conversation_variables(app_id=app_id)
end_at = time.perf_counter()
logging.info(click.style(f'App and related data deleted: {app_id} latency: {end_at - start_at}', fg='green'))
@@ -225,6 +228,13 @@ def del_conversation(conversation_id: str):
"conversation"
)
+def _delete_conversation_variables(*, app_id: str):
+ stmt = delete(ConversationVariable).where(ConversationVariable.app_id == app_id)
+ with db.engine.connect() as conn:
+ conn.execute(stmt)
+ conn.commit()
+ logging.info(click.style(f"Deleted conversation variables for app {app_id}", fg='green'))
+
def _delete_app_messages(tenant_id: str, app_id: str):
def del_message(message_id: str):
@@ -299,7 +309,7 @@ def del_trace_app_config(trace_app_config_id: str):
)
-def _delete_records(query_sql: str, params: dict, delete_func: callable, name: str) -> None:
+def _delete_records(query_sql: str, params: dict, delete_func: Callable, name: str) -> None:
while True:
with db.engine.begin() as conn:
rs = conn.execute(db.text(query_sql), params)
diff --git a/api/tests/integration_tests/.env.example b/api/tests/integration_tests/.env.example
index f29e5ef4d61285..2d52399d29a995 100644
--- a/api/tests/integration_tests/.env.example
+++ b/api/tests/integration_tests/.env.example
@@ -79,4 +79,7 @@ CODE_EXECUTION_API_KEY=
VOLC_API_KEY=
VOLC_SECRET_KEY=
VOLC_MODEL_ENDPOINT_ID=
-VOLC_EMBEDDING_ENDPOINT_ID=
\ No newline at end of file
+VOLC_EMBEDDING_ENDPOINT_ID=
+
+# 360 AI Credentials
+ZHINAO_API_KEY=
diff --git a/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py b/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py
new file mode 100644
index 00000000000000..c2fe95974b10f1
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py
@@ -0,0 +1,93 @@
+from core.model_runtime.model_providers.huggingface_tei.tei_helper import TeiModelExtraParameter
+
+
+class MockTEIClass:
+ @staticmethod
+ def get_tei_extra_parameter(server_url: str, model_name: str) -> TeiModelExtraParameter:
+ # During mock, we don't have a real server to query, so we just return a dummy value
+ if 'rerank' in model_name:
+ model_type = 'reranker'
+ else:
+ model_type = 'embedding'
+
+ return TeiModelExtraParameter(model_type=model_type, max_input_length=512, max_client_batch_size=1)
+
+ @staticmethod
+ def invoke_tokenize(server_url: str, texts: list[str]) -> list[list[dict]]:
+ # Use space as token separator, and split the text into tokens
+ tokenized_texts = []
+ for text in texts:
+ tokens = text.split(' ')
+ current_index = 0
+ tokenized_text = []
+ for idx, token in enumerate(tokens):
+ s_token = {
+ 'id': idx,
+ 'text': token,
+ 'special': False,
+ 'start': current_index,
+ 'stop': current_index + len(token),
+ }
+ current_index += len(token) + 1
+ tokenized_text.append(s_token)
+ tokenized_texts.append(tokenized_text)
+ return tokenized_texts
+
+ @staticmethod
+ def invoke_embeddings(server_url: str, texts: list[str]) -> dict:
+ # {
+ # "object": "list",
+ # "data": [
+ # {
+ # "object": "embedding",
+ # "embedding": [...],
+ # "index": 0
+ # }
+ # ],
+ # "model": "MODEL_NAME",
+ # "usage": {
+ # "prompt_tokens": 3,
+ # "total_tokens": 3
+ # }
+ # }
+ embeddings = []
+ for idx, text in enumerate(texts):
+ embedding = [0.1] * 768
+ embeddings.append(
+ {
+ 'object': 'embedding',
+ 'embedding': embedding,
+ 'index': idx,
+ }
+ )
+ return {
+ 'object': 'list',
+ 'data': embeddings,
+ 'model': 'MODEL_NAME',
+ 'usage': {
+ 'prompt_tokens': sum(len(text.split(' ')) for text in texts),
+ 'total_tokens': sum(len(text.split(' ')) for text in texts),
+ },
+ }
+
+ def invoke_rerank(server_url: str, query: str, texts: list[str]) -> list[dict]:
+ # Example response:
+ # [
+ # {
+ # "index": 0,
+ # "text": "Deep Learning is ...",
+ # "score": 0.9950755
+ # }
+ # ]
+ reranked_docs = []
+ for idx, text in enumerate(texts):
+ reranked_docs.append(
+ {
+ 'index': idx,
+ 'text': text,
+ 'score': 0.9,
+ }
+ )
+ # For mock, only return the first document
+ break
+ return reranked_docs
diff --git a/api/tests/integration_tests/model_runtime/huggingface_tei/__init__.py b/api/tests/integration_tests/model_runtime/huggingface_tei/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/tests/integration_tests/model_runtime/huggingface_tei/test_embeddings.py b/api/tests/integration_tests/model_runtime/huggingface_tei/test_embeddings.py
new file mode 100644
index 00000000000000..ed371fbc07aa8d
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/huggingface_tei/test_embeddings.py
@@ -0,0 +1,72 @@
+import os
+
+import pytest
+
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.huggingface_tei.text_embedding.text_embedding import (
+ HuggingfaceTeiTextEmbeddingModel,
+ TeiHelper,
+)
+from tests.integration_tests.model_runtime.__mock.huggingface_tei import MockTEIClass
+
+MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'
+
+
+@pytest.fixture
+def setup_tei_mock(request, monkeypatch: pytest.MonkeyPatch):
+ if MOCK:
+ monkeypatch.setattr(TeiHelper, 'get_tei_extra_parameter', MockTEIClass.get_tei_extra_parameter)
+ monkeypatch.setattr(TeiHelper, 'invoke_tokenize', MockTEIClass.invoke_tokenize)
+ monkeypatch.setattr(TeiHelper, 'invoke_embeddings', MockTEIClass.invoke_embeddings)
+ monkeypatch.setattr(TeiHelper, 'invoke_rerank', MockTEIClass.invoke_rerank)
+ yield
+
+ if MOCK:
+ monkeypatch.undo()
+
+
+@pytest.mark.parametrize('setup_tei_mock', [['none']], indirect=True)
+def test_validate_credentials(setup_tei_mock):
+ model = HuggingfaceTeiTextEmbeddingModel()
+ # model name is only used in mock
+ model_name = 'embedding'
+
+ if MOCK:
+ # TEI Provider will check model type by API endpoint, at real server, the model type is correct.
+ # So we dont need to check model type here. Only check in mock
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model='reranker',
+ credentials={
+ 'server_url': os.environ.get('TEI_EMBEDDING_SERVER_URL', ""),
+ }
+ )
+
+ model.validate_credentials(
+ model=model_name,
+ credentials={
+ 'server_url': os.environ.get('TEI_EMBEDDING_SERVER_URL', ""),
+ }
+ )
+
+@pytest.mark.parametrize('setup_tei_mock', [['none']], indirect=True)
+def test_invoke_model(setup_tei_mock):
+ model = HuggingfaceTeiTextEmbeddingModel()
+ model_name = 'embedding'
+
+ result = model.invoke(
+ model=model_name,
+ credentials={
+ 'server_url': os.environ.get('TEI_EMBEDDING_SERVER_URL', ""),
+ },
+ texts=[
+ "hello",
+ "world"
+ ],
+ user="abc-123"
+ )
+
+ assert isinstance(result, TextEmbeddingResult)
+ assert len(result.embeddings) == 2
+ assert result.usage.total_tokens > 0
diff --git a/api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py b/api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py
new file mode 100644
index 00000000000000..57e229e6be94e9
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py
@@ -0,0 +1,76 @@
+import os
+
+import pytest
+
+from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.huggingface_tei.rerank.rerank import (
+ HuggingfaceTeiRerankModel,
+)
+from core.model_runtime.model_providers.huggingface_tei.text_embedding.text_embedding import TeiHelper
+from tests.integration_tests.model_runtime.__mock.huggingface_tei import MockTEIClass
+
+MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'
+
+
+@pytest.fixture
+def setup_tei_mock(request, monkeypatch: pytest.MonkeyPatch):
+ if MOCK:
+ monkeypatch.setattr(TeiHelper, 'get_tei_extra_parameter', MockTEIClass.get_tei_extra_parameter)
+ monkeypatch.setattr(TeiHelper, 'invoke_tokenize', MockTEIClass.invoke_tokenize)
+ monkeypatch.setattr(TeiHelper, 'invoke_embeddings', MockTEIClass.invoke_embeddings)
+ monkeypatch.setattr(TeiHelper, 'invoke_rerank', MockTEIClass.invoke_rerank)
+ yield
+
+ if MOCK:
+ monkeypatch.undo()
+
+@pytest.mark.parametrize('setup_tei_mock', [['none']], indirect=True)
+def test_validate_credentials(setup_tei_mock):
+ model = HuggingfaceTeiRerankModel()
+ # model name is only used in mock
+ model_name = 'reranker'
+
+ if MOCK:
+ # TEI Provider will check model type by API endpoint, at real server, the model type is correct.
+ # So we dont need to check model type here. Only check in mock
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model='embedding',
+ credentials={
+ 'server_url': os.environ.get('TEI_RERANK_SERVER_URL'),
+ }
+ )
+
+ model.validate_credentials(
+ model=model_name,
+ credentials={
+ 'server_url': os.environ.get('TEI_RERANK_SERVER_URL'),
+ }
+ )
+
+@pytest.mark.parametrize('setup_tei_mock', [['none']], indirect=True)
+def test_invoke_model(setup_tei_mock):
+ model = HuggingfaceTeiRerankModel()
+ # model name is only used in mock
+ model_name = 'reranker'
+
+ result = model.invoke(
+ model=model_name,
+ credentials={
+ 'server_url': os.environ.get('TEI_RERANK_SERVER_URL'),
+ },
+ query="Who is Kasumi?",
+ docs=[
+ "Kasumi is a girl's name of Japanese origin meaning \"mist\".",
+ "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ",
+ "and she leads a team named PopiParty."
+ ],
+ score_threshold=0.8
+ )
+
+ assert isinstance(result, RerankResult)
+ assert len(result.docs) == 1
+ assert result.docs[0].index == 0
+ assert result.docs[0].score >= 0.8
diff --git a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_speech2text.py b/api/tests/integration_tests/model_runtime/openai_api_compatible/test_speech2text.py
new file mode 100644
index 00000000000000..61079104dcad73
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/openai_api_compatible/test_speech2text.py
@@ -0,0 +1,59 @@
+import os
+
+import pytest
+
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.openai_api_compatible.speech2text.speech2text import (
+ OAICompatSpeech2TextModel,
+)
+
+
+def test_validate_credentials():
+ model = OAICompatSpeech2TextModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model="whisper-1",
+ credentials={
+ "api_key": "invalid_key",
+ "endpoint_url": "https://api.openai.com/v1/"
+ },
+ )
+
+ model.validate_credentials(
+ model="whisper-1",
+ credentials={
+ "api_key": os.environ.get("OPENAI_API_KEY"),
+ "endpoint_url": "https://api.openai.com/v1/"
+ },
+ )
+
+
+def test_invoke_model():
+ model = OAICompatSpeech2TextModel()
+
+ # Get the directory of the current file
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+
+ # Get assets directory
+ assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
+
+ # Construct the path to the audio file
+ audio_file_path = os.path.join(assets_dir, "audio.mp3")
+
+ # Open the file and get the file object
+ with open(audio_file_path, "rb") as audio_file:
+ file = audio_file
+
+ result = model.invoke(
+ model="whisper-1",
+ credentials={
+ "api_key": os.environ.get("OPENAI_API_KEY"),
+ "endpoint_url": "https://api.openai.com/v1/"
+ },
+ file=file,
+ user="abc-123",
+ )
+
+ assert isinstance(result, str)
+ assert result == '1, 2, 3, 4, 5, 6, 7, 8, 9, 10'
diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_speech2text.py b/api/tests/integration_tests/model_runtime/siliconflow/test_speech2text.py
new file mode 100644
index 00000000000000..82b7921c8506f0
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/siliconflow/test_speech2text.py
@@ -0,0 +1,53 @@
+import os
+
+import pytest
+
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.siliconflow.speech2text.speech2text import SiliconflowSpeech2TextModel
+
+
+def test_validate_credentials():
+ model = SiliconflowSpeech2TextModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model="iic/SenseVoiceSmall",
+ credentials={
+ "api_key": "invalid_key"
+ },
+ )
+
+ model.validate_credentials(
+ model="iic/SenseVoiceSmall",
+ credentials={
+ "api_key": os.environ.get("API_KEY")
+ },
+ )
+
+
+def test_invoke_model():
+ model = SiliconflowSpeech2TextModel()
+
+ # Get the directory of the current file
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+
+ # Get assets directory
+ assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
+
+ # Construct the path to the audio file
+ audio_file_path = os.path.join(assets_dir, "audio.mp3")
+
+ # Open the file and get the file object
+ with open(audio_file_path, "rb") as audio_file:
+ file = audio_file
+
+ result = model.invoke(
+ model="iic/SenseVoiceSmall",
+ credentials={
+ "api_key": os.environ.get("API_KEY")
+ },
+ file=file
+ )
+
+ assert isinstance(result, str)
+ assert result == '1,2,3,4,5,6,7,8,9,10.'
diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_text_embedding.py b/api/tests/integration_tests/model_runtime/siliconflow/test_text_embedding.py
new file mode 100644
index 00000000000000..18bd2e893ae10a
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/siliconflow/test_text_embedding.py
@@ -0,0 +1,62 @@
+import os
+
+import pytest
+
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.siliconflow.text_embedding.text_embedding import (
+ SiliconflowTextEmbeddingModel,
+)
+
+
+def test_validate_credentials():
+ model = SiliconflowTextEmbeddingModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model="BAAI/bge-large-zh-v1.5",
+ credentials={
+ "api_key": "invalid_key"
+ },
+ )
+
+ model.validate_credentials(
+ model="BAAI/bge-large-zh-v1.5",
+ credentials={
+ "api_key": os.environ.get("API_KEY"),
+ },
+ )
+
+
+def test_invoke_model():
+ model = SiliconflowTextEmbeddingModel()
+
+ result = model.invoke(
+ model="BAAI/bge-large-zh-v1.5",
+ credentials={
+ "api_key": os.environ.get("API_KEY"),
+ },
+ texts=[
+ "hello",
+ "world",
+ ],
+ user="abc-123",
+ )
+
+ assert isinstance(result, TextEmbeddingResult)
+ assert len(result.embeddings) == 2
+ assert result.usage.total_tokens == 6
+
+
+def test_get_num_tokens():
+ model = SiliconflowTextEmbeddingModel()
+
+ num_tokens = model.get_num_tokens(
+ model="BAAI/bge-large-zh-v1.5",
+ credentials={
+ "api_key": os.environ.get("API_KEY"),
+ },
+ texts=["hello", "world"],
+ )
+
+ assert num_tokens == 2
diff --git a/api/tests/integration_tests/model_runtime/zhinao/__init__.py b/api/tests/integration_tests/model_runtime/zhinao/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/tests/integration_tests/model_runtime/zhinao/test_llm.py b/api/tests/integration_tests/model_runtime/zhinao/test_llm.py
new file mode 100644
index 00000000000000..47a5b6cae23587
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/zhinao/test_llm.py
@@ -0,0 +1,106 @@
+import os
+from collections.abc import Generator
+
+import pytest
+
+from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
+from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.zhinao.llm.llm import ZhinaoLargeLanguageModel
+
+
+def test_validate_credentials():
+ model = ZhinaoLargeLanguageModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model='360gpt2-pro',
+ credentials={
+ 'api_key': 'invalid_key'
+ }
+ )
+
+ model.validate_credentials(
+ model='360gpt2-pro',
+ credentials={
+ 'api_key': os.environ.get('ZHINAO_API_KEY')
+ }
+ )
+
+
+def test_invoke_model():
+ model = ZhinaoLargeLanguageModel()
+
+ response = model.invoke(
+ model='360gpt2-pro',
+ credentials={
+ 'api_key': os.environ.get('ZHINAO_API_KEY')
+ },
+ prompt_messages=[
+ UserPromptMessage(
+ content='Who are you?'
+ )
+ ],
+ model_parameters={
+ 'temperature': 0.5,
+ 'max_tokens': 10
+ },
+ stop=['How'],
+ stream=False,
+ user="abc-123"
+ )
+
+ assert isinstance(response, LLMResult)
+ assert len(response.message.content) > 0
+
+
+def test_invoke_stream_model():
+ model = ZhinaoLargeLanguageModel()
+
+ response = model.invoke(
+ model='360gpt2-pro',
+ credentials={
+ 'api_key': os.environ.get('ZHINAO_API_KEY')
+ },
+ prompt_messages=[
+ UserPromptMessage(
+ content='Hello World!'
+ )
+ ],
+ model_parameters={
+ 'temperature': 0.5,
+ 'max_tokens': 100,
+ 'seed': 1234
+ },
+ stream=True,
+ user="abc-123"
+ )
+
+ assert isinstance(response, Generator)
+
+ for chunk in response:
+ assert isinstance(chunk, LLMResultChunk)
+ assert isinstance(chunk.delta, LLMResultChunkDelta)
+ assert isinstance(chunk.delta.message, AssistantPromptMessage)
+ assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
+
+
+def test_get_num_tokens():
+ model = ZhinaoLargeLanguageModel()
+
+ num_tokens = model.get_num_tokens(
+ model='360gpt2-pro',
+ credentials={
+ 'api_key': os.environ.get('ZHINAO_API_KEY')
+ },
+ prompt_messages=[
+ SystemPromptMessage(
+ content='You are a helpful AI assistant.',
+ ),
+ UserPromptMessage(
+ content='Hello World!'
+ )
+ ]
+ )
+
+ assert num_tokens == 21
diff --git a/api/tests/integration_tests/model_runtime/zhinao/test_provider.py b/api/tests/integration_tests/model_runtime/zhinao/test_provider.py
new file mode 100644
index 00000000000000..87b0e6c2d9b5de
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/zhinao/test_provider.py
@@ -0,0 +1,21 @@
+import os
+
+import pytest
+
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.zhinao.zhinao import ZhinaoProvider
+
+
+def test_validate_provider_credentials():
+ provider = ZhinaoProvider()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ provider.validate_provider_credentials(
+ credentials={}
+ )
+
+ provider.validate_provider_credentials(
+ credentials={
+ 'api_key': os.environ.get('ZHINAO_API_KEY')
+ }
+ )
diff --git a/api/tests/integration_tests/vdb/elasticsearch/__init__.py b/api/tests/integration_tests/vdb/elasticsearch/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py b/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py
new file mode 100644
index 00000000000000..b1c1cc10d9375d
--- /dev/null
+++ b/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py
@@ -0,0 +1,25 @@
+from core.rag.datasource.vdb.elasticsearch.elasticsearch_vector import ElasticSearchConfig, ElasticSearchVector
+from tests.integration_tests.vdb.test_vector_store import (
+ AbstractVectorTest,
+ setup_mock_redis,
+)
+
+
+class ElasticSearchVectorTest(AbstractVectorTest):
+ def __init__(self):
+ super().__init__()
+ self.attributes = ['doc_id', 'dataset_id', 'document_id', 'doc_hash']
+ self.vector = ElasticSearchVector(
+ index_name=self.collection_name.lower(),
+ config=ElasticSearchConfig(
+ host='http://localhost',
+ port='9200',
+ username='elastic',
+ password='elastic'
+ ),
+ attributes=self.attributes
+ )
+
+
+def test_elasticsearch_vector(setup_mock_redis):
+ ElasticSearchVectorTest().run_all_tests()
diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py
index ac704e4eaf54df..4686ce06752ed5 100644
--- a/api/tests/integration_tests/workflow/nodes/test_llm.py
+++ b/api/tests/integration_tests/workflow/nodes/test_llm.py
@@ -10,8 +10,8 @@
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers import ModelProviderFactory
-from core.workflow.entities.node_entities import SystemVariable
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.base_node import UserFrom
from core.workflow.nodes.llm.llm_node import LLMNode
from extensions.ext_database import db
@@ -236,4 +236,4 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock):
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert 'sunny' in json.dumps(result.process_data)
- assert 'what\'s the weather today?' in json.dumps(result.process_data)
\ No newline at end of file
+ assert 'what\'s the weather today?' in json.dumps(result.process_data)
diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
index 312ad47026beb5..adf5ffe3cadf77 100644
--- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
+++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py
@@ -12,8 +12,8 @@
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
-from core.workflow.entities.node_entities import SystemVariable
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.base_node import UserFrom
from core.workflow.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode
from extensions.ext_database import db
@@ -363,7 +363,7 @@ def test_extract_json_response():
{
"location": "kawaii"
}
- hello world.
+ hello world.
""")
assert result['location'] == 'kawaii'
@@ -445,4 +445,4 @@ def test_chat_parameter_extractor_with_memory(setup_anthropic_mock):
assert latest_role != prompt.get('role')
if prompt.get('role') in ['user', 'assistant']:
- latest_role = prompt.get('role')
\ No newline at end of file
+ latest_role = prompt.get('role')
diff --git a/api/tests/unit_tests/core/app/segments/test_factory.py b/api/tests/unit_tests/core/app/segments/test_factory.py
index 85321ee3741dc2..afd0fa50b590f8 100644
--- a/api/tests/unit_tests/core/app/segments/test_factory.py
+++ b/api/tests/unit_tests/core/app/segments/test_factory.py
@@ -3,19 +3,17 @@
import pytest
from core.app.segments import (
- ArrayFileVariable,
ArrayNumberVariable,
ArrayObjectVariable,
ArrayStringVariable,
- FileVariable,
FloatVariable,
IntegerVariable,
- NoneSegment,
ObjectSegment,
SecretVariable,
StringVariable,
factory,
)
+from core.app.segments.exc import VariableError
def test_string_variable():
@@ -44,7 +42,7 @@ def test_secret_variable():
def test_invalid_value_type():
test_data = {'value_type': 'unknown', 'name': 'test_invalid', 'value': 'value'}
- with pytest.raises(ValueError):
+ with pytest.raises(VariableError):
factory.build_variable_from_mapping(test_data)
@@ -67,7 +65,7 @@ def test_build_a_object_variable_with_none_value():
}
)
assert isinstance(var, ObjectSegment)
- assert isinstance(var.value['key1'], NoneSegment)
+ assert var.value['key1'] is None
def test_object_variable():
@@ -77,26 +75,14 @@ def test_object_variable():
'name': 'test_object',
'description': 'Description of the variable.',
'value': {
- 'key1': {
- 'id': str(uuid4()),
- 'value_type': 'string',
- 'name': 'text',
- 'value': 'text',
- 'description': 'Description of the variable.',
- },
- 'key2': {
- 'id': str(uuid4()),
- 'value_type': 'number',
- 'name': 'number',
- 'value': 1,
- 'description': 'Description of the variable.',
- },
+ 'key1': 'text',
+ 'key2': 2,
},
}
variable = factory.build_variable_from_mapping(mapping)
assert isinstance(variable, ObjectSegment)
- assert isinstance(variable.value['key1'], StringVariable)
- assert isinstance(variable.value['key2'], IntegerVariable)
+ assert isinstance(variable.value['key1'], str)
+ assert isinstance(variable.value['key2'], int)
def test_array_string_variable():
@@ -106,26 +92,14 @@ def test_array_string_variable():
'name': 'test_array',
'description': 'Description of the variable.',
'value': [
- {
- 'id': str(uuid4()),
- 'value_type': 'string',
- 'name': 'text',
- 'value': 'text',
- 'description': 'Description of the variable.',
- },
- {
- 'id': str(uuid4()),
- 'value_type': 'string',
- 'name': 'text',
- 'value': 'text',
- 'description': 'Description of the variable.',
- },
+ 'text',
+ 'text',
],
}
variable = factory.build_variable_from_mapping(mapping)
assert isinstance(variable, ArrayStringVariable)
- assert isinstance(variable.value[0], StringVariable)
- assert isinstance(variable.value[1], StringVariable)
+ assert isinstance(variable.value[0], str)
+ assert isinstance(variable.value[1], str)
def test_array_number_variable():
@@ -135,26 +109,14 @@ def test_array_number_variable():
'name': 'test_array',
'description': 'Description of the variable.',
'value': [
- {
- 'id': str(uuid4()),
- 'value_type': 'number',
- 'name': 'number',
- 'value': 1,
- 'description': 'Description of the variable.',
- },
- {
- 'id': str(uuid4()),
- 'value_type': 'number',
- 'name': 'number',
- 'value': 2.0,
- 'description': 'Description of the variable.',
- },
+ 1,
+ 2.0,
],
}
variable = factory.build_variable_from_mapping(mapping)
assert isinstance(variable, ArrayNumberVariable)
- assert isinstance(variable.value[0], IntegerVariable)
- assert isinstance(variable.value[1], FloatVariable)
+ assert isinstance(variable.value[0], int)
+ assert isinstance(variable.value[1], float)
def test_array_object_variable():
@@ -165,143 +127,32 @@ def test_array_object_variable():
'description': 'Description of the variable.',
'value': [
{
- 'id': str(uuid4()),
- 'value_type': 'object',
- 'name': 'object',
- 'description': 'Description of the variable.',
- 'value': {
- 'key1': {
- 'id': str(uuid4()),
- 'value_type': 'string',
- 'name': 'text',
- 'value': 'text',
- 'description': 'Description of the variable.',
- },
- 'key2': {
- 'id': str(uuid4()),
- 'value_type': 'number',
- 'name': 'number',
- 'value': 1,
- 'description': 'Description of the variable.',
- },
- },
+ 'key1': 'text',
+ 'key2': 1,
},
{
- 'id': str(uuid4()),
- 'value_type': 'object',
- 'name': 'object',
- 'description': 'Description of the variable.',
- 'value': {
- 'key1': {
- 'id': str(uuid4()),
- 'value_type': 'string',
- 'name': 'text',
- 'value': 'text',
- 'description': 'Description of the variable.',
- },
- 'key2': {
- 'id': str(uuid4()),
- 'value_type': 'number',
- 'name': 'number',
- 'value': 1,
- 'description': 'Description of the variable.',
- },
- },
+ 'key1': 'text',
+ 'key2': 1,
},
],
}
variable = factory.build_variable_from_mapping(mapping)
assert isinstance(variable, ArrayObjectVariable)
- assert isinstance(variable.value[0], ObjectSegment)
- assert isinstance(variable.value[1], ObjectSegment)
- assert isinstance(variable.value[0].value['key1'], StringVariable)
- assert isinstance(variable.value[0].value['key2'], IntegerVariable)
- assert isinstance(variable.value[1].value['key1'], StringVariable)
- assert isinstance(variable.value[1].value['key2'], IntegerVariable)
+ assert isinstance(variable.value[0], dict)
+ assert isinstance(variable.value[1], dict)
+ assert isinstance(variable.value[0]['key1'], str)
+ assert isinstance(variable.value[0]['key2'], int)
+ assert isinstance(variable.value[1]['key1'], str)
+ assert isinstance(variable.value[1]['key2'], int)
-def test_file_variable():
- mapping = {
- 'id': str(uuid4()),
- 'value_type': 'file',
- 'name': 'test_file',
- 'description': 'Description of the variable.',
- 'value': {
- 'id': str(uuid4()),
- 'tenant_id': 'tenant_id',
- 'type': 'image',
- 'transfer_method': 'local_file',
- 'url': 'url',
- 'related_id': 'related_id',
- 'extra_config': {
- 'image_config': {
- 'width': 100,
- 'height': 100,
- },
- },
- 'filename': 'filename',
- 'extension': 'extension',
- 'mime_type': 'mime_type',
- },
- }
- variable = factory.build_variable_from_mapping(mapping)
- assert isinstance(variable, FileVariable)
-
-
-def test_array_file_variable():
- mapping = {
- 'id': str(uuid4()),
- 'value_type': 'array[file]',
- 'name': 'test_array_file',
- 'description': 'Description of the variable.',
- 'value': [
- {
- 'id': str(uuid4()),
- 'name': 'file',
- 'value_type': 'file',
- 'value': {
- 'id': str(uuid4()),
- 'tenant_id': 'tenant_id',
- 'type': 'image',
- 'transfer_method': 'local_file',
- 'url': 'url',
- 'related_id': 'related_id',
- 'extra_config': {
- 'image_config': {
- 'width': 100,
- 'height': 100,
- },
- },
- 'filename': 'filename',
- 'extension': 'extension',
- 'mime_type': 'mime_type',
- },
- },
+def test_variable_cannot_large_than_5_kb():
+ with pytest.raises(VariableError):
+ factory.build_variable_from_mapping(
{
'id': str(uuid4()),
- 'name': 'file',
- 'value_type': 'file',
- 'value': {
- 'id': str(uuid4()),
- 'tenant_id': 'tenant_id',
- 'type': 'image',
- 'transfer_method': 'local_file',
- 'url': 'url',
- 'related_id': 'related_id',
- 'extra_config': {
- 'image_config': {
- 'width': 100,
- 'height': 100,
- },
- },
- 'filename': 'filename',
- 'extension': 'extension',
- 'mime_type': 'mime_type',
- },
- },
- ],
- }
- variable = factory.build_variable_from_mapping(mapping)
- assert isinstance(variable, ArrayFileVariable)
- assert isinstance(variable.value[0], FileVariable)
- assert isinstance(variable.value[1], FileVariable)
+ 'value_type': 'string',
+ 'name': 'test_text',
+ 'value': 'a' * 1024 * 6,
+ }
+ )
diff --git a/api/tests/unit_tests/core/app/segments/test_segment.py b/api/tests/unit_tests/core/app/segments/test_segment.py
index 414404b7d0362a..7e3e69ffbfc45d 100644
--- a/api/tests/unit_tests/core/app/segments/test_segment.py
+++ b/api/tests/unit_tests/core/app/segments/test_segment.py
@@ -1,7 +1,7 @@
from core.app.segments import SecretVariable, StringSegment, parser
from core.helper import encrypter
-from core.workflow.entities.node_entities import SystemVariable
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
def test_segment_group_to_text():
diff --git a/api/tests/unit_tests/core/app/segments/test_variables.py b/api/tests/unit_tests/core/app/segments/test_variables.py
index e3f513971a4810..1f45c15f8712a9 100644
--- a/api/tests/unit_tests/core/app/segments/test_variables.py
+++ b/api/tests/unit_tests/core/app/segments/test_variables.py
@@ -54,20 +54,10 @@ def test_object_variable_to_object():
var = ObjectVariable(
name='object',
value={
- 'key1': ObjectVariable(
- name='object',
- value={
- 'key2': StringVariable(name='key2', value='value2'),
- },
- ),
- 'key2': ArrayAnyVariable(
- name='array',
- value=[
- StringVariable(name='key5_1', value='value5_1'),
- IntegerVariable(name='key5_2', value=42),
- ObjectVariable(name='key5_3', value={}),
- ],
- ),
+ 'key1': {
+ 'key2': 'value2',
+ },
+ 'key2': ['value5_1', 42, {}],
},
)
diff --git a/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py
index fd284488b548fe..d24cd4aae98ded 100644
--- a/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py
+++ b/api/tests/unit_tests/core/prompt/test_advanced_prompt_transform.py
@@ -2,8 +2,8 @@
import pytest
-from core.app.app_config.entities import FileExtraConfig, ModelConfigEntity
-from core.file.file_obj import FileTransferMethod, FileType, FileVar
+from core.app.app_config.entities import ModelConfigEntity
+from core.file.file_obj import FileExtraConfig, FileTransferMethod, FileType, FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessageRole, UserPromptMessage
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
diff --git a/api/tests/unit_tests/core/workflow/nodes/test_answer.py b/api/tests/unit_tests/core/workflow/nodes/test_answer.py
index 3a32829e373c28..4617b6a42f8ec2 100644
--- a/api/tests/unit_tests/core/workflow/nodes/test_answer.py
+++ b/api/tests/unit_tests/core/workflow/nodes/test_answer.py
@@ -1,8 +1,8 @@
from unittest.mock import MagicMock
from core.app.entities.app_invoke_entities import InvokeFrom
-from core.workflow.entities.node_entities import SystemVariable
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.answer.answer_node import AnswerNode
from core.workflow.nodes.base_node import UserFrom
from extensions.ext_database import db
diff --git a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py
index 4662c5ff2b26d8..d21b7785c4f4a4 100644
--- a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py
+++ b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py
@@ -1,8 +1,8 @@
from unittest.mock import MagicMock
from core.app.entities.app_invoke_entities import InvokeFrom
-from core.workflow.entities.node_entities import SystemVariable
from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
from core.workflow.nodes.base_node import UserFrom
from core.workflow.nodes.if_else.if_else_node import IfElseNode
from extensions.ext_database import db
diff --git a/api/tests/unit_tests/core/workflow/nodes/test_variable_assigner.py b/api/tests/unit_tests/core/workflow/nodes/test_variable_assigner.py
new file mode 100644
index 00000000000000..0b37d06fc069bc
--- /dev/null
+++ b/api/tests/unit_tests/core/workflow/nodes/test_variable_assigner.py
@@ -0,0 +1,150 @@
+from unittest import mock
+from uuid import uuid4
+
+from core.app.entities.app_invoke_entities import InvokeFrom
+from core.app.segments import ArrayStringVariable, StringVariable
+from core.workflow.entities.variable_pool import VariablePool
+from core.workflow.enums import SystemVariable
+from core.workflow.nodes.base_node import UserFrom
+from core.workflow.nodes.variable_assigner import VariableAssignerNode, WriteMode
+
+DEFAULT_NODE_ID = 'node_id'
+
+
+def test_overwrite_string_variable():
+ conversation_variable = StringVariable(
+ id=str(uuid4()),
+ name='test_conversation_variable',
+ value='the first value',
+ )
+
+ input_variable = StringVariable(
+ id=str(uuid4()),
+ name='test_string_variable',
+ value='the second value',
+ )
+
+ node = VariableAssignerNode(
+ tenant_id='tenant_id',
+ app_id='app_id',
+ workflow_id='workflow_id',
+ user_id='user_id',
+ user_from=UserFrom.ACCOUNT,
+ invoke_from=InvokeFrom.DEBUGGER,
+ config={
+ 'id': 'node_id',
+ 'data': {
+ 'assigned_variable_selector': ['conversation', conversation_variable.name],
+ 'write_mode': WriteMode.OVER_WRITE.value,
+ 'input_variable_selector': [DEFAULT_NODE_ID, input_variable.name],
+ },
+ },
+ )
+
+ variable_pool = VariablePool(
+ system_variables={SystemVariable.CONVERSATION_ID: 'conversation_id'},
+ user_inputs={},
+ environment_variables=[],
+ conversation_variables=[conversation_variable],
+ )
+ variable_pool.add(
+ [DEFAULT_NODE_ID, input_variable.name],
+ input_variable,
+ )
+
+ with mock.patch('core.workflow.nodes.variable_assigner.update_conversation_variable') as mock_run:
+ node.run(variable_pool)
+ mock_run.assert_called_once()
+
+ got = variable_pool.get(['conversation', conversation_variable.name])
+ assert got is not None
+ assert got.value == 'the second value'
+ assert got.to_object() == 'the second value'
+
+
+def test_append_variable_to_array():
+ conversation_variable = ArrayStringVariable(
+ id=str(uuid4()),
+ name='test_conversation_variable',
+ value=['the first value'],
+ )
+
+ input_variable = StringVariable(
+ id=str(uuid4()),
+ name='test_string_variable',
+ value='the second value',
+ )
+
+ node = VariableAssignerNode(
+ tenant_id='tenant_id',
+ app_id='app_id',
+ workflow_id='workflow_id',
+ user_id='user_id',
+ user_from=UserFrom.ACCOUNT,
+ invoke_from=InvokeFrom.DEBUGGER,
+ config={
+ 'id': 'node_id',
+ 'data': {
+ 'assigned_variable_selector': ['conversation', conversation_variable.name],
+ 'write_mode': WriteMode.APPEND.value,
+ 'input_variable_selector': [DEFAULT_NODE_ID, input_variable.name],
+ },
+ },
+ )
+
+ variable_pool = VariablePool(
+ system_variables={SystemVariable.CONVERSATION_ID: 'conversation_id'},
+ user_inputs={},
+ environment_variables=[],
+ conversation_variables=[conversation_variable],
+ )
+ variable_pool.add(
+ [DEFAULT_NODE_ID, input_variable.name],
+ input_variable,
+ )
+
+ with mock.patch('core.workflow.nodes.variable_assigner.update_conversation_variable') as mock_run:
+ node.run(variable_pool)
+ mock_run.assert_called_once()
+
+ got = variable_pool.get(['conversation', conversation_variable.name])
+ assert got is not None
+ assert got.to_object() == ['the first value', 'the second value']
+
+
+def test_clear_array():
+ conversation_variable = ArrayStringVariable(
+ id=str(uuid4()),
+ name='test_conversation_variable',
+ value=['the first value'],
+ )
+
+ node = VariableAssignerNode(
+ tenant_id='tenant_id',
+ app_id='app_id',
+ workflow_id='workflow_id',
+ user_id='user_id',
+ user_from=UserFrom.ACCOUNT,
+ invoke_from=InvokeFrom.DEBUGGER,
+ config={
+ 'id': 'node_id',
+ 'data': {
+ 'assigned_variable_selector': ['conversation', conversation_variable.name],
+ 'write_mode': WriteMode.CLEAR.value,
+ 'input_variable_selector': [],
+ },
+ },
+ )
+
+ variable_pool = VariablePool(
+ system_variables={SystemVariable.CONVERSATION_ID: 'conversation_id'},
+ user_inputs={},
+ environment_variables=[],
+ conversation_variables=[conversation_variable],
+ )
+
+ node.run(variable_pool)
+
+ got = variable_pool.get(['conversation', conversation_variable.name])
+ assert got is not None
+ assert got.to_object() == []
diff --git a/api/tests/unit_tests/models/test_conversation_variable.py b/api/tests/unit_tests/models/test_conversation_variable.py
new file mode 100644
index 00000000000000..9e16010d7ef5a4
--- /dev/null
+++ b/api/tests/unit_tests/models/test_conversation_variable.py
@@ -0,0 +1,25 @@
+from uuid import uuid4
+
+from core.app.segments import SegmentType, factory
+from models import ConversationVariable
+
+
+def test_from_variable_and_to_variable():
+ variable = factory.build_variable_from_mapping(
+ {
+ 'id': str(uuid4()),
+ 'name': 'name',
+ 'value_type': SegmentType.OBJECT,
+ 'value': {
+ 'key': {
+ 'key': 'value',
+ }
+ },
+ }
+ )
+
+ conversation_variable = ConversationVariable.from_variable(
+ app_id='app_id', conversation_id='conversation_id', variable=variable
+ )
+
+ assert conversation_variable.to_variable() == variable
diff --git a/api/tests/unit_tests/utils/position_helper/test_position_helper.py b/api/tests/unit_tests/utils/position_helper/test_position_helper.py
index 22373199043d6c..eefe374df0762e 100644
--- a/api/tests/unit_tests/utils/position_helper/test_position_helper.py
+++ b/api/tests/unit_tests/utils/position_helper/test_position_helper.py
@@ -2,7 +2,7 @@
import pytest
-from core.helper.position_helper import get_position_map
+from core.helper.position_helper import get_position_map, sort_and_filter_position_map
@pytest.fixture
@@ -53,3 +53,47 @@ def test_position_helper_with_all_commented(prepare_empty_commented_positions_ya
folder_path=prepare_empty_commented_positions_yaml,
file_name='example_positions_all_commented.yaml')
assert position_map == {}
+
+
+def test_excluded_position_map(prepare_example_positions_yaml):
+ position_map = get_position_map(
+ folder_path=prepare_example_positions_yaml,
+ file_name='example_positions.yaml'
+ )
+ pin_list = ['forth', 'first']
+ include_list = []
+ exclude_list = ['9999999999999']
+ sorted_filtered_position_map = sort_and_filter_position_map(
+ original_position_map=position_map,
+ pin_list=pin_list,
+ include_list=include_list,
+ exclude_list=exclude_list
+ )
+ assert sorted_filtered_position_map == {
+ 'forth': 0,
+ 'first': 1,
+ 'second': 2,
+ 'third': 3,
+ }
+
+
+def test_included_position_map(prepare_example_positions_yaml):
+ position_map = get_position_map(
+ folder_path=prepare_example_positions_yaml,
+ file_name='example_positions.yaml'
+ )
+ pin_list = ['second', 'first']
+ include_list = ['first', 'second', 'third', 'forth']
+ exclude_list = []
+ sorted_filtered_position_map = sort_and_filter_position_map(
+ original_position_map=position_map,
+ pin_list=pin_list,
+ include_list=include_list,
+ exclude_list=exclude_list
+ )
+ assert sorted_filtered_position_map == {
+ 'second': 0,
+ 'first': 1,
+ 'third': 2,
+ 'forth': 3,
+ }
diff --git a/dev/pytest/pytest_vdb.sh b/dev/pytest/pytest_vdb.sh
index c954c528fb2499..0b23200dc33b0e 100755
--- a/dev/pytest/pytest_vdb.sh
+++ b/dev/pytest/pytest_vdb.sh
@@ -7,4 +7,5 @@ pytest api/tests/integration_tests/vdb/chroma \
api/tests/integration_tests/vdb/pgvector \
api/tests/integration_tests/vdb/qdrant \
api/tests/integration_tests/vdb/weaviate \
+ api/tests/integration_tests/vdb/elasticsearch \
api/tests/integration_tests/vdb/test_vector_store.py
\ No newline at end of file
diff --git a/dev/reformat b/dev/reformat
index f50ccb04c44ed1..ad83e897d978bd 100755
--- a/dev/reformat
+++ b/dev/reformat
@@ -11,5 +11,8 @@ fi
# run ruff linter
ruff check --fix ./api
+# run ruff formatter
+ruff format ./api
+
# run dotenv-linter linter
dotenv-linter ./api/.env.example ./web/.env.example
diff --git a/docker-legacy/docker-compose.yaml b/docker-legacy/docker-compose.yaml
index 807946f3fea820..aed2586053ceb0 100644
--- a/docker-legacy/docker-compose.yaml
+++ b/docker-legacy/docker-compose.yaml
@@ -2,7 +2,7 @@ version: '3'
services:
# API service
api:
- image: langgenius/dify-api:0.6.16
+ image: langgenius/dify-api:0.7.0
restart: always
environment:
# Startup mode, 'api' starts the API server.
@@ -169,6 +169,11 @@ services:
CHROMA_DATABASE: default_database
CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS: xxxxxx
+ # ElasticSearch Config
+ ELASTICSEARCH_HOST: 127.0.0.1
+ ELASTICSEARCH_PORT: 9200
+ ELASTICSEARCH_USERNAME: elastic
+ ELASTICSEARCH_PASSWORD: elastic
# Mail configuration, support: resend, smtp
MAIL_TYPE: ''
# default send from email address, if not specified
@@ -224,7 +229,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:0.6.16
+ image: langgenius/dify-api:0.7.0
restart: always
environment:
CONSOLE_WEB_URL: ''
@@ -371,6 +376,11 @@ services:
CHROMA_DATABASE: default_database
CHROMA_AUTH_PROVIDER: chromadb.auth.token_authn.TokenAuthClientProvider
CHROMA_AUTH_CREDENTIALS: xxxxxx
+ # ElasticSearch Config
+ ELASTICSEARCH_HOST: 127.0.0.1
+ ELASTICSEARCH_PORT: 9200
+ ELASTICSEARCH_USERNAME: elastic
+ ELASTICSEARCH_PASSWORD: elastic
# Notion import configuration, support public and internal
NOTION_INTEGRATION_TYPE: public
NOTION_CLIENT_SECRET: you-client-secret
@@ -390,7 +400,7 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:0.6.16
+ image: langgenius/dify-web:0.7.0
restart: always
environment:
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
diff --git a/docker/.env.example b/docker/.env.example
index c463bf1bec8f41..5898d3e62a8d16 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -173,6 +173,36 @@ SQLALCHEMY_POOL_RECYCLE=3600
# Whether to print SQL, default is false.
SQLALCHEMY_ECHO=false
+# Maximum number of connections to the database
+# Default is 100
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
+POSTGRES_MAX_CONNECTIONS=100
+
+# Sets the amount of shared memory used for postgres's shared buffers.
+# Default is 128MB
+# Recommended value: 25% of available memory
+# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
+POSTGRES_SHARED_BUFFERS=128MB
+
+# Sets the amount of memory used by each database worker for working space.
+# Default is 4MB
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
+POSTGRES_WORK_MEM=4MB
+
+# Sets the amount of memory reserved for maintenance activities.
+# Default is 64MB
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
+POSTGRES_MAINTENANCE_WORK_MEM=64MB
+
+# Sets the planner's assumption about the effective cache size.
+# Default is 4096MB
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
+POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
+
# ------------------------------
# Redis Configuration
# This Redis configuration is used for caching and for pub/sub during conversation.
@@ -627,6 +657,7 @@ NGINX_KEEPALIVE_TIMEOUT=65
NGINX_PROXY_READ_TIMEOUT=3600s
NGINX_PROXY_SEND_TIMEOUT=3600s
+# Set true to accept requests for /.well-known/acme-challenge/
NGINX_ENABLE_CERTBOT_CHALLENGE=false
# ------------------------------
@@ -664,3 +695,22 @@ COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
# ------------------------------
EXPOSE_NGINX_PORT=80
EXPOSE_NGINX_SSL_PORT=443
+
+# ----------------------------------------------------------------------------
+# ModelProvider & Tool Position Configuration
+# Used to specify the model providers and tools that can be used in the app.
+# ----------------------------------------------------------------------------
+
+# Pin, include, and exclude tools
+# Use comma-separated values with no spaces between items.
+# Example: POSITION_TOOL_PINS=bing,google
+POSITION_TOOL_PINS=
+POSITION_TOOL_INCLUDES=
+POSITION_TOOL_EXCLUDES=
+
+# Pin, include, and exclude model providers
+# Use comma-separated values with no spaces between items.
+# Example: POSITION_PROVIDER_PINS=openai,openllm
+POSITION_PROVIDER_PINS=
+POSITION_PROVIDER_INCLUDES=
+POSITION_PROVIDER_EXCLUDES=
\ No newline at end of file
diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml
index 6ab003ceabe5d5..3aa84d009e2397 100644
--- a/docker/docker-compose.middleware.yaml
+++ b/docker/docker-compose.middleware.yaml
@@ -9,6 +9,12 @@ services:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
POSTGRES_DB: ${POSTGRES_DB:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
+ command: >
+ postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
+ -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
+ -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
+ -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
+ -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
ports:
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index 1864542668147e..f3151bbc2ad2af 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -35,6 +35,11 @@ x-shared-env: &shared-api-worker-env
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
+ POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
+ POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
+ POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
+ POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
+ POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_USERNAME: ${REDIS_USERNAME:-}
@@ -120,6 +125,10 @@ x-shared-env: &shared-api-worker-env
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
+ ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-127.0.0.1}
+ ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
+ ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
+ ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
# AnalyticDB configuration
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-}
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-}
@@ -182,7 +191,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
- image: langgenius/dify-api:0.6.16
+ image: langgenius/dify-api:0.7.0
restart: always
environment:
# Use the shared environment variables.
@@ -202,7 +211,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
- image: langgenius/dify-api:0.6.16
+ image: langgenius/dify-api:0.7.0
restart: always
environment:
# Use the shared environment variables.
@@ -221,12 +230,13 @@ services:
# Frontend web application.
web:
- image: langgenius/dify-web:0.6.16
+ image: langgenius/dify-web:0.7.0
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
APP_API_URL: ${APP_API_URL:-}
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
+ NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
# The postgres database.
db:
@@ -237,6 +247,12 @@ services:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
POSTGRES_DB: ${POSTGRES_DB:-dify}
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
+ command: >
+ postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
+ -c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
+ -c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
+ -c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
+ -c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
volumes:
- ./volumes/db/data:/var/lib/postgresql/data
healthcheck:
@@ -571,7 +587,7 @@ services:
# MyScale vector database
myscale:
container_name: myscale
- image: myscale/myscaledb:1.6
+ image: myscale/myscaledb:1.6.4
profiles:
- myscale
restart: always
@@ -583,6 +599,27 @@ services:
ports:
- "${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}"
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
+ container_name: elasticsearch
+ profiles:
+ - elasticsearch
+ restart: always
+ environment:
+ - "ELASTIC_PASSWORD=${ELASTICSEARCH_USERNAME:-elastic}"
+ - "cluster.name=dify-es-cluster"
+ - "node.name=dify-es0"
+ - "discovery.type=single-node"
+ - "xpack.security.http.ssl.enabled=false"
+ - "xpack.license.self_generated.type=trial"
+ ports:
+ - "${ELASTICSEARCH_PORT:-9200}:${ELASTICSEARCH_PORT:-9200}"
+ healthcheck:
+ test: ["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"]
+ interval: 30s
+ timeout: 10s
+ retries: 50
+
# unstructured .
# (if used, you need to set ETL_TYPE to Unstructured in the api & worker service.)
unstructured:
diff --git a/docker/middleware.env.example b/docker/middleware.env.example
index 750dcfe9502f11..04d0fb5ed30cf5 100644
--- a/docker/middleware.env.example
+++ b/docker/middleware.env.example
@@ -9,6 +9,35 @@ POSTGRES_DB=dify
# postgres data directory
PGDATA=/var/lib/postgresql/data/pgdata
+# Maximum number of connections to the database
+# Default is 100
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
+POSTGRES_MAX_CONNECTIONS=100
+
+# Sets the amount of shared memory used for postgres's shared buffers.
+# Default is 128MB
+# Recommended value: 25% of available memory
+# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
+POSTGRES_SHARED_BUFFERS=128MB
+
+# Sets the amount of memory used by each database worker for working space.
+# Default is 4MB
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
+POSTGRES_WORK_MEM=4MB
+
+# Sets the amount of memory reserved for maintenance activities.
+# Default is 64MB
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
+POSTGRES_MAINTENANCE_WORK_MEM=64MB
+
+# Sets the planner's assumption about the effective cache size.
+# Default is 4096MB
+#
+# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
+POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
# ------------------------------
# Environment Variables for sandbox Service
diff --git a/web/.env.example b/web/.env.example
index 653913033d8d05..439092c20e0a0e 100644
--- a/web/.env.example
+++ b/web/.env.example
@@ -13,3 +13,6 @@ NEXT_PUBLIC_PUBLIC_API_PREFIX=http://localhost:5001/api
# SENTRY
NEXT_PUBLIC_SENTRY_DSN=
+
+# Disable Next.js Telemetry (https://nextjs.org/telemetry)
+NEXT_TELEMETRY_DISABLED=1
\ No newline at end of file
diff --git a/web/Dockerfile b/web/Dockerfile
index 56957f0927010f..48bdb2301ad206 100644
--- a/web/Dockerfile
+++ b/web/Dockerfile
@@ -39,6 +39,7 @@ ENV DEPLOY_ENV=PRODUCTION
ENV CONSOLE_API_URL=http://127.0.0.1:5001
ENV APP_API_URL=http://127.0.0.1:5001
ENV PORT=3000
+ENV NEXT_TELEMETRY_DISABLED=1
# set timezone
ENV TZ=UTC
diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx
index 88c37d0b125f72..bc724c1449af64 100644
--- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx
+++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx
@@ -117,7 +117,6 @@ const Panel: FC = () => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
- const [isFold, setFold] = useState(false)
const [controlShowPopup, setControlShowPopup] = useState(0)
const showPopup = useCallback(() => {
setControlShowPopup(Date.now())
diff --git a/web/app/(commonLayout)/datasets/template/template.en.mdx b/web/app/(commonLayout)/datasets/template/template.en.mdx
index 36395d391de1b3..44c5964d77736b 100644
--- a/web/app/(commonLayout)/datasets/template/template.en.mdx
+++ b/web/app/(commonLayout)/datasets/template/template.en.mdx
@@ -922,6 +922,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from
Knowledge ID
+
+ Document ID
+
Document Segment ID
@@ -965,6 +968,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from
Knowledge ID
+
+ Document ID
+
Document Segment ID
diff --git a/web/app/(commonLayout)/datasets/template/template.zh.mdx b/web/app/(commonLayout)/datasets/template/template.zh.mdx
index a624c0594feffb..9f79b0f900287d 100644
--- a/web/app/(commonLayout)/datasets/template/template.zh.mdx
+++ b/web/app/(commonLayout)/datasets/template/template.zh.mdx
@@ -922,6 +922,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from
知识库 ID
+
+ 文档 ID
+
文档分段ID
@@ -965,6 +968,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from
知识库 ID
+
+ 文档 ID
+
文档分段ID
diff --git a/web/app/components/app/annotation/edit-annotation-modal/edit-item/index.tsx b/web/app/components/app/annotation/edit-annotation-modal/edit-item/index.tsx
index 63788447de2a89..8e88a4b5f335ad 100644
--- a/web/app/components/app/annotation/edit-annotation-modal/edit-item/index.tsx
+++ b/web/app/components/app/annotation/edit-annotation-modal/edit-item/index.tsx
@@ -79,7 +79,7 @@ const EditItem: FC = ({
{!readonly && (
{
+ onClick={() => {
setIsEdit(true)
}}
>
diff --git a/web/app/components/app/configuration/config-prompt/confirm-add-var/index.tsx b/web/app/components/app/configuration/config-prompt/confirm-add-var/index.tsx
index bfe51379655c0d..f08f2ffc6968e7 100644
--- a/web/app/components/app/configuration/config-prompt/confirm-add-var/index.tsx
+++ b/web/app/components/app/configuration/config-prompt/confirm-add-var/index.tsx
@@ -24,7 +24,7 @@ const ConfirmAddVar: FC = ({
varNameArr,
onConfrim,
onCancel,
- onHide,
+ // onHide,
}) => {
const { t } = useTranslation()
const mainContentRef = useRef(null)
diff --git a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
index 2811c4240238ec..adcfcdd1261f9f 100644
--- a/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
+++ b/web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx
@@ -69,7 +69,6 @@ const Prompt: FC = ({
hasSetBlockStatus,
showSelectDataSet,
externalDataToolsConfig,
- isAgent,
} = useContext(ConfigContext)
const { notify } = useToastContext()
const { setShowExternalDataToolModal } = useModalContext()
@@ -166,7 +165,7 @@ const Prompt: FC = ({
)}
- {!isAgent && !readonly && !isMobile && (
+ {!readonly && !isMobile && (
)}
diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx
index 00d7ffb0fbb206..646ae8011645d2 100644
--- a/web/app/components/app/log/list.tsx
+++ b/web/app/components/app/log/list.tsx
@@ -126,6 +126,7 @@ const getFormattedChatList = (messages: ChatMessage[], conversationId: string, t
tokens: item.answer_tokens + item.message_tokens,
latency: item.provider_response_latency.toFixed(2),
},
+ citation: item.metadata?.retriever_resources,
annotation: (() => {
if (item.annotation_hit_history) {
return {
diff --git a/web/app/components/app/overview/appCard.tsx b/web/app/components/app/overview/appCard.tsx
index ccf1f8ef1a5fe9..ea0b793857d4d3 100644
--- a/web/app/components/app/overview/appCard.tsx
+++ b/web/app/components/app/overview/appCard.tsx
@@ -53,7 +53,7 @@ function AppCard({
}: IAppCardProps) {
const router = useRouter()
const pathname = usePathname()
- const { currentWorkspace, isCurrentWorkspaceManager, isCurrentWorkspaceEditor } = useAppContext()
+ const { isCurrentWorkspaceManager, isCurrentWorkspaceEditor } = useAppContext()
const [showSettingsModal, setShowSettingsModal] = useState(false)
const [showEmbedded, setShowEmbedded] = useState(false)
const [showCustomizeModal, setShowCustomizeModal] = useState(false)
diff --git a/web/app/components/base/agent-log-modal/result.tsx b/web/app/components/base/agent-log-modal/result.tsx
index 691a611f3696f1..7cba63ba9598ea 100644
--- a/web/app/components/base/agent-log-modal/result.tsx
+++ b/web/app/components/base/agent-log-modal/result.tsx
@@ -21,7 +21,6 @@ type ResultPanelProps = {
}
const ResultPanel: FC = ({
- status,
elapsed_time,
total_tokens,
error,
diff --git a/web/app/components/base/badge.tsx b/web/app/components/base/badge.tsx
index 3e5414fa2cba81..c3300a1e67e590 100644
--- a/web/app/components/base/badge.tsx
+++ b/web/app/components/base/badge.tsx
@@ -4,16 +4,19 @@ import cn from '@/utils/classnames'
type BadgeProps = {
className?: string
text: string
+ uppercase?: boolean
}
const Badge = ({
className,
text,
+ uppercase = true,
}: BadgeProps) => {
return (
diff --git a/web/app/components/base/block-input/index.tsx b/web/app/components/base/block-input/index.tsx
index f2b6b5d6dc174c..79ff646bd12039 100644
--- a/web/app/components/base/block-input/index.tsx
+++ b/web/app/components/base/block-input/index.tsx
@@ -49,8 +49,6 @@ const BlockInput: FC
= ({
setCurrentValue(value)
}, [value])
- const isContentChanged = value !== currentValue
-
const contentEditableRef = useRef(null)
const [isEditing, setIsEditing] = useState(false)
useEffect(() => {
diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx
index ab8b3648e7dae9..3fa301d268744d 100644
--- a/web/app/components/base/chat/chat-with-history/hooks.tsx
+++ b/web/app/components/base/chat/chat-with-history/hooks.tsx
@@ -37,11 +37,14 @@ import type {
import { addFileInfos, sortAgentSorts } from '@/app/components/tools/utils'
import { useToastContext } from '@/app/components/base/toast'
import { changeLanguage } from '@/i18n/i18next-config'
+import { useAppFavicon } from '@/hooks/use-app-favicon'
export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
const isInstalledApp = useMemo(() => !!installedAppInfo, [installedAppInfo])
const { data: appInfo, isLoading: appInfoLoading, error: appInfoError } = useSWR(installedAppInfo ? null : 'appInfo', fetchAppInfo)
+ useAppFavicon(!installedAppInfo, appInfo?.site.icon, appInfo?.site.icon_background)
+
const appData = useMemo(() => {
if (isInstalledApp) {
const { id, app } = installedAppInfo!
diff --git a/web/app/components/base/chat/chat/thought/index.tsx b/web/app/components/base/chat/chat/thought/index.tsx
index 10bc8394e568e8..768f432e853d84 100644
--- a/web/app/components/base/chat/chat/thought/index.tsx
+++ b/web/app/components/base/chat/chat/thought/index.tsx
@@ -1,14 +1,10 @@
'use client'
import type { FC } from 'react'
import React from 'react'
-import { useContext } from 'use-context-selector'
import type { ThoughtItem, ToolInfoInThought } from '../type'
import Tool from '@/app/components/base/chat/chat/thought/tool'
import type { Emoji } from '@/app/components/tools/types'
-import I18n from '@/context/i18n'
-import { getLanguage } from '@/i18n/language'
-
export type IThoughtProps = {
thought: ThoughtItem
allToolIcons: Record
@@ -31,9 +27,6 @@ const Thought: FC = ({
allToolIcons,
isFinished,
}) => {
- const { locale } = useContext(I18n)
- const language = getLanguage(locale)
-
const [toolNames, isValueArray]: [string[], boolean] = (() => {
try {
if (Array.isArray(JSON.parse(thought.tool)))
diff --git a/web/app/components/base/chat/chat/type.ts b/web/app/components/base/chat/chat/type.ts
index d1c2839e09194b..16ccff4d4d2239 100644
--- a/web/app/components/base/chat/chat/type.ts
+++ b/web/app/components/base/chat/chat/type.ts
@@ -13,8 +13,14 @@ export type Feedbacktype = {
content?: string | null
}
-export type FeedbackFunc = (messageId: string, feedback: Feedbacktype) => Promise
-export type SubmitAnnotationFunc = (messageId: string, content: string) => Promise
+export type FeedbackFunc = (
+ messageId: string,
+ feedback: Feedbacktype
+) => Promise
+export type SubmitAnnotationFunc = (
+ messageId: string,
+ content: string
+) => Promise
export type DisplayScene = 'web' | 'console'
@@ -91,20 +97,22 @@ export type IChatItem = {
input?: any
}
-export type MessageEnd = {
- id: string
- metadata: {
- retriever_resources?: CitationItem[]
- annotation_reply: {
+export type Metadata = {
+ retriever_resources?: CitationItem[]
+ annotation_reply: {
+ id: string
+ account: {
id: string
- account: {
- id: string
- name: string
- }
+ name: string
}
}
}
+export type MessageEnd = {
+ id: string
+ metadata: Metadata
+}
+
export type MessageReplace = {
id: string
task_id: string
diff --git a/web/app/components/base/confirm/index.tsx b/web/app/components/base/confirm/index.tsx
index 137687b4bd3b73..813254cb3f3ba9 100644
--- a/web/app/components/base/confirm/index.tsx
+++ b/web/app/components/base/confirm/index.tsx
@@ -89,7 +89,7 @@ function Confirm({
{title}
-
{content}
+
{content}
{showCancel &&
{cancelTxt} }
diff --git a/web/app/components/base/emoji-picker/index.tsx b/web/app/components/base/emoji-picker/index.tsx
index f861bcb20c680b..8840f47950c75d 100644
--- a/web/app/components/base/emoji-picker/index.tsx
+++ b/web/app/components/base/emoji-picker/index.tsx
@@ -3,8 +3,8 @@
import type { ChangeEvent, FC } from 'react'
import React, { useState } from 'react'
import data from '@emoji-mart/data'
-import type { Emoji, EmojiMartData } from '@emoji-mart/data'
-import { SearchIndex, init } from 'emoji-mart'
+import type { EmojiMartData } from '@emoji-mart/data'
+import { init } from 'emoji-mart'
import {
MagnifyingGlassIcon,
} from '@heroicons/react/24/outline'
@@ -13,8 +13,8 @@ import s from './style.module.css'
import cn from '@/utils/classnames'
import Divider from '@/app/components/base/divider'
import Button from '@/app/components/base/button'
-
import Modal from '@/app/components/base/modal'
+import { searchEmoji } from '@/utils/emoji'
declare global {
namespace JSX {
@@ -30,15 +30,6 @@ declare global {
init({ data })
-async function search(value: string) {
- const emojis: Emoji[] = await SearchIndex.search(value) || []
-
- const results = emojis.map((emoji) => {
- return emoji.skins[0].native
- })
- return results
-}
-
const backgroundColors = [
'#FFEAD5',
'#E4FBCC',
@@ -105,7 +96,7 @@ const EmojiPicker: FC
= ({
}
else {
setIsSearching(true)
- const emojis = await search(e.target.value)
+ const emojis = await searchEmoji(e.target.value)
setSearchedEmojis(emojis)
}
}}
diff --git a/web/app/components/base/icons/assets/vender/line/others/bubble-x.svg b/web/app/components/base/icons/assets/vender/line/others/bubble-x.svg
new file mode 100644
index 00000000000000..6e4df5b9b843bb
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/line/others/bubble-x.svg
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/assets/vender/line/others/long-arrow-left.svg b/web/app/components/base/icons/assets/vender/line/others/long-arrow-left.svg
new file mode 100644
index 00000000000000..7320664db67618
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/line/others/long-arrow-left.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/web/app/components/base/icons/assets/vender/line/others/long-arrow-right.svg b/web/app/components/base/icons/assets/vender/line/others/long-arrow-right.svg
new file mode 100644
index 00000000000000..733785a276f88c
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/line/others/long-arrow-right.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/web/app/components/base/icons/assets/vender/workflow/assigner.svg b/web/app/components/base/icons/assets/vender/workflow/assigner.svg
new file mode 100644
index 00000000000000..b37fbce52672ed
--- /dev/null
+++ b/web/app/components/base/icons/assets/vender/workflow/assigner.svg
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/web/app/components/base/icons/src/vender/line/others/BubbleX.json b/web/app/components/base/icons/src/vender/line/others/BubbleX.json
new file mode 100644
index 00000000000000..0cb5702c1f606b
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/line/others/BubbleX.json
@@ -0,0 +1,57 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "16",
+ "height": "16",
+ "viewBox": "0 0 16 16",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "Icon L"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "Vector"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M3.33463 3.33333C2.96643 3.33333 2.66796 3.63181 2.66796 4V10.6667C2.66796 11.0349 2.96643 11.3333 3.33463 11.3333H4.66796C5.03615 11.3333 5.33463 11.6318 5.33463 12V12.8225L7.65833 11.4283C7.76194 11.3662 7.8805 11.3333 8.00132 11.3333H12.0013C12.3695 11.3333 12.668 11.0349 12.668 10.6667C12.668 10.2985 12.9665 10 13.3347 10C13.7028 10 14.0013 10.2985 14.0013 10.6667C14.0013 11.7713 13.1058 12.6667 12.0013 12.6667H8.18598L5.01095 14.5717C4.805 14.6952 4.5485 14.6985 4.33949 14.5801C4.13049 14.4618 4.00129 14.2402 4.00129 14V12.6667H3.33463C2.23006 12.6667 1.33463 11.7713 1.33463 10.6667V4C1.33463 2.89543 2.23006 2 3.33463 2H6.66798C7.03617 2 7.33464 2.29848 7.33464 2.66667C7.33464 3.03486 7.03617 3.33333 6.66798 3.33333H3.33463Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M8.74113 2.66667C8.74113 2.29848 9.03961 2 9.4078 2H10.331C10.9721 2 11.5177 2.43571 11.6859 3.04075L11.933 3.93004L12.8986 2.77189C13.3045 2.28508 13.9018 2 14.536 2H14.5954C14.9636 2 15.2621 2.29848 15.2621 2.66667C15.2621 3.03486 14.9636 3.33333 14.5954 3.33333H14.536C14.3048 3.33333 14.08 3.43702 13.9227 3.6257L12.367 5.49165L12.8609 7.2689C12.8746 7.31803 12.9105 7.33333 12.9312 7.33333H13.8543C14.2225 7.33333 14.521 7.63181 14.521 8C14.521 8.36819 14.2225 8.66667 13.8543 8.66667H12.9312C12.29 8.66667 11.7444 8.23095 11.5763 7.62591L11.3291 6.73654L10.3634 7.89478C9.95758 8.38159 9.36022 8.66667 8.72604 8.66667H8.66666C8.29847 8.66667 7.99999 8.36819 7.99999 8C7.99999 7.63181 8.29847 7.33333 8.66666 7.33333H8.72604C8.95723 7.33333 9.18204 7.22965 9.33935 7.04096L10.8951 5.17493L10.4012 3.39777C10.3876 3.34863 10.3516 3.33333 10.331 3.33333H9.4078C9.03961 3.33333 8.74113 3.03486 8.74113 2.66667Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "name": "BubbleX"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/line/others/BubbleX.tsx b/web/app/components/base/icons/src/vender/line/others/BubbleX.tsx
new file mode 100644
index 00000000000000..7d78bd33c7a92a
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/line/others/BubbleX.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './BubbleX.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'BubbleX'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/line/others/LongArrowLeft.json b/web/app/components/base/icons/src/vender/line/others/LongArrowLeft.json
new file mode 100644
index 00000000000000..d2646b10909f3a
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/line/others/LongArrowLeft.json
@@ -0,0 +1,27 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "21",
+ "height": "8",
+ "viewBox": "0 0 21 8",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M0.646446 3.64645C0.451185 3.84171 0.451185 4.15829 0.646446 4.35355L3.82843 7.53553C4.02369 7.7308 4.34027 7.7308 4.53553 7.53553C4.7308 7.34027 4.7308 7.02369 4.53553 6.82843L1.70711 4L4.53553 1.17157C4.7308 0.976311 4.7308 0.659728 4.53553 0.464466C4.34027 0.269204 4.02369 0.269204 3.82843 0.464466L0.646446 3.64645ZM21 3.5L1 3.5V4.5L21 4.5V3.5Z",
+ "fill": "currentColor",
+ "fill-opacity": "0.3"
+ },
+ "children": []
+ }
+ ]
+ },
+ "name": "LongArrowLeft"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/line/others/LongArrowLeft.tsx b/web/app/components/base/icons/src/vender/line/others/LongArrowLeft.tsx
new file mode 100644
index 00000000000000..930ced5360d798
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/line/others/LongArrowLeft.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './LongArrowLeft.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'LongArrowLeft'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/line/others/LongArrowRight.json b/web/app/components/base/icons/src/vender/line/others/LongArrowRight.json
new file mode 100644
index 00000000000000..7582b81568b3fe
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/line/others/LongArrowRight.json
@@ -0,0 +1,27 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "26",
+ "height": "8",
+ "viewBox": "0 0 26 8",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "d": "M25.3536 4.35355C25.5488 4.15829 25.5488 3.84171 25.3536 3.64644L22.1716 0.464465C21.9763 0.269202 21.6597 0.269202 21.4645 0.464465C21.2692 0.659727 21.2692 0.976309 21.4645 1.17157L24.2929 4L21.4645 6.82843C21.2692 7.02369 21.2692 7.34027 21.4645 7.53553C21.6597 7.73079 21.9763 7.73079 22.1716 7.53553L25.3536 4.35355ZM3.59058e-08 4.5L25 4.5L25 3.5L-3.59058e-08 3.5L3.59058e-08 4.5Z",
+ "fill": "currentColor",
+ "fill-opacity": "0.3"
+ },
+ "children": []
+ }
+ ]
+ },
+ "name": "LongArrowRight"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/line/others/LongArrowRight.tsx b/web/app/components/base/icons/src/vender/line/others/LongArrowRight.tsx
new file mode 100644
index 00000000000000..3c9084cada9c4f
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/line/others/LongArrowRight.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './LongArrowRight.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'LongArrowRight'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/line/others/index.ts b/web/app/components/base/icons/src/vender/line/others/index.ts
index 282a39499f3651..d54f31e4a9ce7a 100644
--- a/web/app/components/base/icons/src/vender/line/others/index.ts
+++ b/web/app/components/base/icons/src/vender/line/others/index.ts
@@ -1,8 +1,11 @@
export { default as Apps02 } from './Apps02'
+export { default as BubbleX } from './BubbleX'
export { default as Colors } from './Colors'
export { default as DragHandle } from './DragHandle'
export { default as Env } from './Env'
export { default as Exchange02 } from './Exchange02'
export { default as FileCode } from './FileCode'
export { default as Icon3Dots } from './Icon3Dots'
+export { default as LongArrowLeft } from './LongArrowLeft'
+export { default as LongArrowRight } from './LongArrowRight'
export { default as Tools } from './Tools'
diff --git a/web/app/components/base/icons/src/vender/workflow/Assigner.json b/web/app/components/base/icons/src/vender/workflow/Assigner.json
new file mode 100644
index 00000000000000..7106e5ad439179
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/workflow/Assigner.json
@@ -0,0 +1,68 @@
+{
+ "icon": {
+ "type": "element",
+ "isRootNode": true,
+ "name": "svg",
+ "attributes": {
+ "width": "16",
+ "height": "16",
+ "viewBox": "0 0 16 16",
+ "fill": "none",
+ "xmlns": "http://www.w3.org/2000/svg"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "variable assigner"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "g",
+ "attributes": {
+ "id": "Vector"
+ },
+ "children": [
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M1.71438 4.42875C1.71438 3.22516 2.68954 2.25 3.89313 2.25C4.30734 2.25 4.64313 2.58579 4.64313 3C4.64313 3.41421 4.30734 3.75 3.89313 3.75C3.51796 3.75 3.21438 4.05359 3.21438 4.42875V7.28563C3.21438 7.48454 3.13536 7.6753 2.9947 7.81596L2.81066 8L2.9947 8.18404C3.13536 8.3247 3.21438 8.51546 3.21438 8.71437V11.5713C3.21438 11.9464 3.51796 12.25 3.89313 12.25C4.30734 12.25 4.64313 12.5858 4.64313 13C4.64313 13.4142 4.30734 13.75 3.89313 13.75C2.68954 13.75 1.71438 12.7748 1.71438 11.5713V9.02503L1.21967 8.53033C1.07902 8.38968 1 8.19891 1 8C1 7.80109 1.07902 7.61032 1.21967 7.46967L1.71438 6.97497V4.42875ZM11.3568 3C11.3568 2.58579 11.6925 2.25 12.1068 2.25C13.3103 2.25 14.2855 3.22516 14.2855 4.42875V6.97497L14.7802 7.46967C14.9209 7.61032 14.9999 7.80109 14.9999 8C14.9999 8.19891 14.9209 8.38968 14.7802 8.53033L14.2855 9.02503V11.5713C14.2855 12.7751 13.3095 13.75 12.1068 13.75C11.6925 13.75 11.3568 13.4142 11.3568 13C11.3568 12.5858 11.6925 12.25 12.1068 12.25C12.4815 12.25 12.7855 11.9462 12.7855 11.5713V8.71437C12.7855 8.51546 12.8645 8.3247 13.0052 8.18404L13.1892 8L13.0052 7.81596C12.8645 7.6753 12.7855 7.48454 12.7855 7.28563V4.42875C12.7855 4.05359 12.4819 3.75 12.1068 3.75C11.6925 3.75 11.3568 3.41421 11.3568 3Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M5.25 6C5.25 5.58579 5.58579 5.25 6 5.25H10C10.4142 5.25 10.75 5.58579 10.75 6C10.75 6.41421 10.4142 6.75 10 6.75H6C5.58579 6.75 5.25 6.41421 5.25 6Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ },
+ {
+ "type": "element",
+ "name": "path",
+ "attributes": {
+ "fill-rule": "evenodd",
+ "clip-rule": "evenodd",
+ "d": "M5.25 10C5.25 9.58579 5.58579 9.25 6 9.25H10C10.4142 9.25 10.75 9.58579 10.75 10C10.75 10.4142 10.4142 10.75 10 10.75H6C5.58579 10.75 5.25 10.4142 5.25 10Z",
+ "fill": "currentColor"
+ },
+ "children": []
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "name": "Assigner"
+}
\ No newline at end of file
diff --git a/web/app/components/base/icons/src/vender/workflow/Assigner.tsx b/web/app/components/base/icons/src/vender/workflow/Assigner.tsx
new file mode 100644
index 00000000000000..1cb7d692dd91a0
--- /dev/null
+++ b/web/app/components/base/icons/src/vender/workflow/Assigner.tsx
@@ -0,0 +1,16 @@
+// GENERATE BY script
+// DON NOT EDIT IT MANUALLY
+
+import * as React from 'react'
+import data from './Assigner.json'
+import IconBase from '@/app/components/base/icons/IconBase'
+import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
+
+const Icon = React.forwardRef, Omit>((
+ props,
+ ref,
+) => )
+
+Icon.displayName = 'Assigner'
+
+export default Icon
diff --git a/web/app/components/base/icons/src/vender/workflow/index.ts b/web/app/components/base/icons/src/vender/workflow/index.ts
index 94e20ae6a9e1f3..a2563a6a36584f 100644
--- a/web/app/components/base/icons/src/vender/workflow/index.ts
+++ b/web/app/components/base/icons/src/vender/workflow/index.ts
@@ -1,4 +1,5 @@
export { default as Answer } from './Answer'
+export { default as Assigner } from './Assigner'
export { default as Code } from './Code'
export { default as End } from './End'
export { default as Home } from './Home'
diff --git a/web/app/components/base/input/index.tsx b/web/app/components/base/input/index.tsx
index 0fb34de2e8b369..5ab82494463698 100644
--- a/web/app/components/base/input/index.tsx
+++ b/web/app/components/base/input/index.tsx
@@ -2,7 +2,7 @@
import type { SVGProps } from 'react'
import React, { useState } from 'react'
import { useTranslation } from 'react-i18next'
-import s from './style.module.css'
+import cn from 'classnames'
type InputProps = {
placeholder?: string
@@ -27,10 +27,10 @@ const Input = ({ value, defaultValue, onChange, className = '', wrapperClassName
const { t } = useTranslation()
return (
- {showPrefix &&
{prefixIcon ?? } }
+ {showPrefix &&
{prefixIcon ?? } }
{
diff --git a/web/app/components/base/input/style.module.css b/web/app/components/base/input/style.module.css
deleted file mode 100644
index 5f2782777d6d31..00000000000000
--- a/web/app/components/base/input/style.module.css
+++ /dev/null
@@ -1,7 +0,0 @@
-.input {
- @apply inline-flex h-7 w-full py-1 px-2 rounded-lg text-xs leading-normal;
- @apply bg-gray-100 caret-primary-600 hover:bg-gray-100 focus:ring-1 focus:ring-inset focus:ring-gray-200 focus-visible:outline-none focus:bg-white placeholder:text-gray-400;
-}
-.prefix {
- @apply whitespace-nowrap absolute left-2 self-center
-}
diff --git a/web/app/components/base/markdown.tsx b/web/app/components/base/markdown.tsx
index 3adb4d75e1990c..af4b13ff70deaf 100644
--- a/web/app/components/base/markdown.tsx
+++ b/web/app/components/base/markdown.tsx
@@ -1,4 +1,5 @@
import ReactMarkdown from 'react-markdown'
+import ReactEcharts from 'echarts-for-react'
import 'katex/dist/katex.min.css'
import RemarkMath from 'remark-math'
import RemarkBreaks from 'remark-breaks'
@@ -13,6 +14,7 @@ import cn from '@/utils/classnames'
import CopyBtn from '@/app/components/base/copy-btn'
import SVGBtn from '@/app/components/base/svg'
import Flowchart from '@/app/components/base/mermaid'
+import ImageGallery from '@/app/components/base/image-gallery'
// Available language https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/AVAILABLE_LANGUAGES_HLJS.MD
const capitalizationLanguageNameMap: Record
= {
@@ -30,6 +32,7 @@ const capitalizationLanguageNameMap: Record = {
mermaid: 'Mermaid',
markdown: 'MarkDown',
makefile: 'MakeFile',
+ echarts: 'ECharts',
}
const getCorrectCapitalizationLanguageName = (language: string) => {
if (!language)
@@ -44,9 +47,9 @@ const getCorrectCapitalizationLanguageName = (language: string) => {
const preprocessLaTeX = (content: string) => {
if (typeof content !== 'string')
return content
- return content.replace(/\\\[(.*?)\\\]/gs, (_, equation) => `$$${equation}$$`)
- .replace(/\\\((.*?)\\\)/gs, (_, equation) => `$$${equation}$$`)
- .replace(/(^|[^\\])\$(.+?)\$/gs, (_, prefix, equation) => `${prefix}$${equation}$`)
+ return content.replace(/\\\[(.*?)\\\]/g, (_, equation) => `$$${equation}$$`)
+ .replace(/\\\((.*?)\\\)/g, (_, equation) => `$$${equation}$$`)
+ .replace(/(^|[^\\])\$(.+?)\$/g, (_, prefix, equation) => `${prefix}$${equation}$`)
}
export function PreCode(props: { children: any }) {
@@ -56,12 +59,6 @@ export function PreCode(props: { children: any }) {
{
- if (ref.current) {
- const code = ref.current.innerText
- // copyToClipboard(code);
- }
- }}
>
{props.children}
@@ -107,6 +104,14 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
const match = /language-(\w+)/.exec(className || '')
const language = match?.[1]
const languageShowName = getCorrectCapitalizationLanguageName(language || '')
+ let chartData = JSON.parse(String('{"title":{"text":"Something went wrong."}}').replace(/\n$/, ''))
+ if (language === 'echarts') {
+ try {
+ chartData = JSON.parse(String(children).replace(/\n$/, ''))
+ }
+ catch (error) {
+ }
+ }
// Use `useMemo` to ensure that `SyntaxHighlighter` only re-renders when necessary
return useMemo(() => {
@@ -136,19 +141,25 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
{(language === 'mermaid' && isSVG)
? ( )
- : (
- {String(children).replace(/\n$/, '')}
- )}
+ : (
+ (language === 'echarts')
+ ? (
+
)
+ : (
+ {String(children).replace(/\n$/, '')}
+ ))}
)
: (
@@ -156,7 +167,7 @@ const CodeBlock: CodeComponent = memo(({ inline, className, children, ...props }
{children}
)
- }, [children, className, inline, isSVG, language, languageShowName, match, props])
+ }, [chartData, children, className, inline, isSVG, language, languageShowName, match, props])
})
CodeBlock.displayName = 'CodeBlock'
@@ -172,17 +183,9 @@ export function Markdown(props: { content: string; className?: string }) {
]}
components={{
code: CodeBlock,
- img({ src, alt, ...props }) {
+ img({ src }) {
return (
- // eslint-disable-next-line @next/next/no-img-element
-
+
)
},
p: (paragraph) => {
@@ -192,14 +195,7 @@ export function Markdown(props: { content: string; className?: string }) {
return (
<>
- {/* eslint-disable-next-line @next/next/no-img-element */}
-
+
{paragraph.children.slice(1)}
>
)
diff --git a/web/app/components/base/prompt-editor/index.tsx b/web/app/components/base/prompt-editor/index.tsx
index da70d04ac1b105..deae6833cd69f0 100644
--- a/web/app/components/base/prompt-editor/index.tsx
+++ b/web/app/components/base/prompt-editor/index.tsx
@@ -144,7 +144,7 @@ const PromptEditor: FC
= ({
return (
-
+
}
placeholder={
}
diff --git a/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx b/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx
index e149f5b75a198a..39193fc31d6ae8 100644
--- a/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx
+++ b/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx
@@ -21,10 +21,10 @@ import {
} from './index'
import cn from '@/utils/classnames'
import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development'
-import { Env } from '@/app/components/base/icons/src/vender/line/others'
+import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others'
import { VarBlockIcon } from '@/app/components/workflow/block-icon'
import { Line3 } from '@/app/components/base/icons/src/public/common'
-import { isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
import TooltipPlus from '@/app/components/base/tooltip-plus'
type WorkflowVariableBlockComponentProps = {
@@ -52,6 +52,7 @@ const WorkflowVariableBlockComponent = ({
const [localWorkflowNodesMap, setLocalWorkflowNodesMap] = useState
(workflowNodesMap)
const node = localWorkflowNodesMap![variables[0]]
const isEnv = isENV(variables)
+ const isChatVar = isConversationVar(variables)
useEffect(() => {
if (!editor.hasNodes([WorkflowVariableBlockNode]))
@@ -75,11 +76,11 @@ const WorkflowVariableBlockComponent = ({
className={cn(
'mx-0.5 relative group/wrap flex items-center h-[18px] pl-0.5 pr-[3px] rounded-[5px] border select-none',
isSelected ? ' border-[#84ADFF] bg-[#F5F8FF]' : ' border-black/5 bg-white',
- !node && !isEnv && '!border-[#F04438] !bg-[#FEF3F2]',
+ !node && !isEnv && !isChatVar && '!border-[#F04438] !bg-[#FEF3F2]',
)}
ref={ref}
>
- {!isEnv && (
+ {!isEnv && !isChatVar && (
{
node?.type && (
@@ -97,11 +98,12 @@ const WorkflowVariableBlockComponent = ({
)}
- {!isEnv &&
}
+ {!isEnv && !isChatVar &&
}
{isEnv &&
}
-
{varName}
+ {isChatVar &&
}
+
{varName}
{
- !node && !isEnv && (
+ !node && !isEnv && !isChatVar && (
)
}
@@ -109,7 +111,7 @@ const WorkflowVariableBlockComponent = ({
)
- if (!node && !isEnv) {
+ if (!node && !isEnv && !isChatVar) {
return (
{Item}
diff --git a/web/app/components/base/tooltip-plus/index.tsx b/web/app/components/base/tooltip-plus/index.tsx
index b833690469459c..1f8a091fa5c9d5 100644
--- a/web/app/components/base/tooltip-plus/index.tsx
+++ b/web/app/components/base/tooltip-plus/index.tsx
@@ -8,6 +8,7 @@ import { PortalToFollowElem, PortalToFollowElemContent, PortalToFollowElemTrigge
export type TooltipProps = {
position?: Placement
triggerMethod?: 'hover' | 'click'
+ disabled?: boolean
popupContent: React.ReactNode
children: React.ReactNode
hideArrow?: boolean
@@ -23,6 +24,7 @@ const arrow = (
const Tooltip: FC = ({
position = 'top',
triggerMethod = 'hover',
+ disabled = false,
popupContent,
children,
hideArrow,
@@ -67,7 +69,7 @@ const Tooltip: FC = ({
return (
{
const { t } = useTranslation()
- const { plan } = useProviderContext()
- const { total } = plan
return (
diff --git a/web/app/components/datasets/hit-testing/textarea.tsx b/web/app/components/datasets/hit-testing/textarea.tsx
index 5c146ae36873c9..1783acd65855fa 100644
--- a/web/app/components/datasets/hit-testing/textarea.tsx
+++ b/web/app/components/datasets/hit-testing/textarea.tsx
@@ -1,4 +1,3 @@
-import { useContext } from 'use-context-selector'
import { useTranslation } from 'react-i18next'
import Button from '../../base/button'
import Tag from '../../base/tag'
@@ -6,7 +5,6 @@ import Tooltip from '../../base/tooltip'
import { getIcon } from '../common/retrieval-method-info'
import s from './style.module.css'
import cn from '@/utils/classnames'
-import DatasetDetailContext from '@/context/dataset-detail'
import type { HitTestingResponse } from '@/models/datasets'
import { hitTesting } from '@/service/datasets'
import { asyncRunSafe } from '@/utils'
@@ -40,7 +38,6 @@ const TextAreaWithButton = ({
onSubmit: _onSubmit,
}: TextAreaWithButtonIProps) => {
const { t } = useTranslation()
- const { indexingTechnique } = useContext(DatasetDetailContext)
function handleTextChange(event: any) {
setText(event.target.value)
diff --git a/web/app/components/develop/template/template_advanced_chat.en.mdx b/web/app/components/develop/template/template_advanced_chat.en.mdx
index 8481b4db43e3c2..cec42a4e321cef 100644
--- a/web/app/components/develop/template/template_advanced_chat.en.mdx
+++ b/web/app/components/develop/template/template_advanced_chat.en.mdx
@@ -141,7 +141,7 @@ Chat applications support session persistence, allowing previous chat history to
- `event` (string) fixed to `workflow_started`
- `data` (object) detail
- `id` (string) Unique ID of workflow execution
- - `workflow_id` (string) ID of relatied workflow
+ - `workflow_id` (string) ID of related workflow
- `sequence_number` (int) Self-increasing serial number, self-increasing in the App, starting from 1
- `created_at` (timestamp) Creation timestamp, e.g., 1705395332
- `event: node_started` node execution started
@@ -185,7 +185,7 @@ Chat applications support session persistence, allowing previous chat history to
- `event` (string) fixed to `workflow_finished`
- `data` (object) detail
- `id` (string) ID of workflow execution
- - `workflow_id` (string) ID of relatied workflow
+ - `workflow_id` (string) ID of related workflow
- `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped`
- `outputs` (json) Optional content of output
- `error` (string) Optional reason of error
diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx
index 1c86aad508de66..495b051bd056d3 100644
--- a/web/app/components/develop/template/template_workflow.en.mdx
+++ b/web/app/components/develop/template/template_workflow.en.mdx
@@ -69,7 +69,7 @@ Workflow applications offers non-session support and is ideal for translation, a
- `task_id` (string) Task ID, used for request tracking and the below Stop Generate API
- `data` (object) detail of result
- `id` (string) ID of workflow execution
- - `workflow_id` (string) ID of relatied workflow
+ - `workflow_id` (string) ID of related workflow
- `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped`
- `outputs` (json) Optional content of output
- `error` (string) Optional reason of error
@@ -94,7 +94,7 @@ Workflow applications offers non-session support and is ideal for translation, a
- `event` (string) fixed to `workflow_started`
- `data` (object) detail
- `id` (string) Unique ID of workflow execution
- - `workflow_id` (string) ID of relatied workflow
+ - `workflow_id` (string) ID of related workflow
- `sequence_number` (int) Self-increasing serial number, self-increasing in the App, starting from 1
- `created_at` (timestamp) Creation timestamp, e.g., 1705395332
- `event: node_started` node execution started
@@ -138,7 +138,7 @@ Workflow applications offers non-session support and is ideal for translation, a
- `event` (string) fixed to `workflow_finished`
- `data` (object) detail
- `id` (string) ID of workflow execution
- - `workflow_id` (string) ID of relatied workflow
+ - `workflow_id` (string) ID of related workflow
- `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped`
- `outputs` (json) Optional content of output
- `error` (string) Optional reason of error
@@ -237,7 +237,7 @@ Workflow applications offers non-session support and is ideal for translation, a
- `workflow_id` (string) Workflow ID, can be obtained from the streaming chunk return
### Response
- `id` (string) ID of workflow execution
- - `workflow_id` (string) ID of relatied workflow
+ - `workflow_id` (string) ID of related workflow
- `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped`
- `inputs` (json) content of input
- `outputs` (json) content of output
diff --git a/web/app/components/header/account-setting/account-page/index.tsx b/web/app/components/header/account-setting/account-page/index.tsx
index 7520022c7bc43a..ce7f7e7e224e6e 100644
--- a/web/app/components/header/account-setting/account-page/index.tsx
+++ b/web/app/components/header/account-setting/account-page/index.tsx
@@ -250,15 +250,29 @@ export default function AccountPage() {
showCancel={false}
type='warning'
title={t('common.account.delete')}
- content={<>
-
- {t('common.account.deleteTip')}
-
-
- >}
+ content={
+ <>
+
+ {t('common.account.deleteTip')}
+
+
+ {`${t('common.account.delete')}: ${userProfile.email}`}
+ >
+ }
confirmText={t('common.operation.ok') as string}
/>
)}
diff --git a/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx b/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx
index 347572c755ae1a..a22ec16c252288 100644
--- a/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx
+++ b/web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx
@@ -19,7 +19,7 @@ const ModelIcon: FC = ({
}) => {
const language = useLanguage()
- if (provider?.provider === 'openai' && modelName?.startsWith('gpt-4'))
+ if (provider?.provider === 'openai' && (modelName?.startsWith('gpt-4') || modelName?.includes('4o')))
return
if (provider?.icon_small) {
diff --git a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx
index 57ea4bdd118fed..eced2a8082bb86 100644
--- a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx
+++ b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/parameter-item.tsx
@@ -100,7 +100,7 @@ const ParameterItem: FC = ({
handleInputChange(v === 1)
}
- const handleStringInputChange = (e: React.ChangeEvent) => {
+ const handleStringInputChange = (e: React.ChangeEvent) => {
handleInputChange(e.target.value)
}
@@ -190,6 +190,16 @@ const ParameterItem: FC = ({
)
}
+ if (parameterRule.type === 'text') {
+ return (
+
+ )
+ }
+
if (parameterRule.type === 'string' && !!parameterRule?.options?.length) {
return (
= ({
title: installedAppInfo?.app.name,
prompt_public: false,
copyright: '',
+ icon: installedAppInfo?.app.icon,
+ icon_background: installedAppInfo?.app.icon_background,
},
plan: 'basic',
}
@@ -408,6 +411,8 @@ const TextGeneration: FC = ({
}
}, [siteInfo?.title, canReplaceLogo])
+ useAppFavicon(!isInstalledApp, siteInfo?.icon, siteInfo?.icon_background)
+
const [isShowResSidebar, { setTrue: doShowResSidebar, setFalse: hideResSidebar }] = useBoolean(false)
const showResSidebar = () => {
// fix: useClickAway hideResSidebar will close sidebar
diff --git a/web/app/components/tools/edit-custom-collection-modal/index.tsx b/web/app/components/tools/edit-custom-collection-modal/index.tsx
index 5fcf6fb0248705..e84e15da17fbb0 100644
--- a/web/app/components/tools/edit-custom-collection-modal/index.tsx
+++ b/web/app/components/tools/edit-custom-collection-modal/index.tsx
@@ -327,36 +327,36 @@ const EditCustomCollectionModal: FC = ({
{t('common.operation.save')}
+ {showEmojiPicker && {
+ setEmoji({ content: icon, background: icon_background })
+ setShowEmojiPicker(false)
+ }}
+ onClose={() => {
+ setShowEmojiPicker(false)
+ }}
+ />}
+ {credentialsModalShow && (
+ setCredentialsModalShow(false)}
+ />)
+ }
+ {isShowTestApi && (
+ setIsShowTestApi(false)}
+ />
+ )}
}
isShowMask={true}
clickOutsideNotOpen={true}
/>
- {showEmojiPicker && {
- setEmoji({ content: icon, background: icon_background })
- setShowEmojiPicker(false)
- }}
- onClose={() => {
- setShowEmojiPicker(false)
- }}
- />}
- {credentialsModalShow && (
- setCredentialsModalShow(false)}
- />)
- }
- {isShowTestApi && (
- setIsShowTestApi(false)}
- />
- )}
>
)
diff --git a/web/app/components/workflow/block-icon.tsx b/web/app/components/workflow/block-icon.tsx
index 6bec7044920ae9..a7e89ad6ce8514 100644
--- a/web/app/components/workflow/block-icon.tsx
+++ b/web/app/components/workflow/block-icon.tsx
@@ -3,6 +3,7 @@ import { memo } from 'react'
import { BlockEnum } from './types'
import {
Answer,
+ Assigner,
Code,
End,
Home,
@@ -43,6 +44,7 @@ const getIcon = (type: BlockEnum, className: string) => {
[BlockEnum.TemplateTransform]: ,
[BlockEnum.VariableAssigner]: ,
[BlockEnum.VariableAggregator]: ,
+ [BlockEnum.Assigner]: ,
[BlockEnum.Tool]: ,
[BlockEnum.Iteration]: ,
[BlockEnum.ParameterExtractor]: ,
@@ -62,6 +64,7 @@ const ICON_CONTAINER_BG_COLOR_MAP: Record = {
[BlockEnum.TemplateTransform]: 'bg-[#2E90FA]',
[BlockEnum.VariableAssigner]: 'bg-[#2E90FA]',
[BlockEnum.VariableAggregator]: 'bg-[#2E90FA]',
+ [BlockEnum.Assigner]: 'bg-[#2E90FA]',
[BlockEnum.ParameterExtractor]: 'bg-[#2E90FA]',
}
const BlockIcon: FC = ({
diff --git a/web/app/components/workflow/block-selector/constants.tsx b/web/app/components/workflow/block-selector/constants.tsx
index 517d9356f2ea6d..fbe0a9a8a38054 100644
--- a/web/app/components/workflow/block-selector/constants.tsx
+++ b/web/app/components/workflow/block-selector/constants.tsx
@@ -59,6 +59,11 @@ export const BLOCKS: Block[] = [
type: BlockEnum.VariableAggregator,
title: 'Variable Aggregator',
},
+ {
+ classification: BlockClassificationEnum.Transform,
+ type: BlockEnum.Assigner,
+ title: 'Variable Assigner',
+ },
{
classification: BlockClassificationEnum.Transform,
type: BlockEnum.ParameterExtractor,
diff --git a/web/app/components/workflow/block-selector/index.tsx b/web/app/components/workflow/block-selector/index.tsx
index dd4177245f6438..a62226a15d1866 100644
--- a/web/app/components/workflow/block-selector/index.tsx
+++ b/web/app/components/workflow/block-selector/index.tsx
@@ -5,6 +5,7 @@ import type {
import {
memo,
useCallback,
+ useMemo,
useState,
} from 'react'
import { useTranslation } from 'react-i18next'
@@ -17,6 +18,7 @@ import {
} from '@remixicon/react'
import type { BlockEnum, OnSelectBlock } from '../types'
import Tabs from './tabs'
+import { TabsEnum } from './types'
import {
PortalToFollowElem,
PortalToFollowElemContent,
@@ -66,6 +68,9 @@ const NodeSelector: FC = ({
const handleOpenChange = useCallback((newOpen: boolean) => {
setLocalOpen(newOpen)
+ if (!newOpen)
+ setSearchText('')
+
if (onOpenChange)
onOpenChange(newOpen)
}, [onOpenChange])
@@ -80,6 +85,19 @@ const NodeSelector: FC = ({
onSelect(type, toolDefaultValue)
}, [handleOpenChange, onSelect])
+ const [activeTab, setActiveTab] = useState(noBlocks ? TabsEnum.Tools : TabsEnum.Blocks)
+ const handleActiveTabChange = useCallback((newActiveTab: TabsEnum) => {
+ setActiveTab(newActiveTab)
+ }, [])
+ const searchPlaceholder = useMemo(() => {
+ if (activeTab === TabsEnum.Blocks)
+ return t('workflow.tabs.searchBlock')
+
+ if (activeTab === TabsEnum.Tools)
+ return t('workflow.tabs.searchTool')
+ return ''
+ }, [activeTab, t])
+
return (
= ({
setSearchText(e.target.value)}
autoFocus
/>
@@ -137,6 +155,8 @@ const NodeSelector: FC = ({
void
searchText: string
onSelect: (type: BlockEnum, tool?: ToolDefaultValue) => void
availableBlocksTypes?: BlockEnum[]
noBlocks?: boolean
}
const Tabs: FC = ({
+ activeTab,
+ onActiveTabChange,
searchText,
onSelect,
availableBlocksTypes,
noBlocks,
}) => {
const tabs = useTabs()
- const [activeTab, setActiveTab] = useState(noBlocks ? TabsEnum.Tools : TabsEnum.Blocks)
return (
e.stopPropagation()}>
@@ -41,7 +41,7 @@ const Tabs: FC = ({
? 'text-gray-700 after:absolute after:bottom-0 after:left-0 after:h-0.5 after:w-full after:bg-primary-600'
: 'text-gray-500',
)}
- onClick={() => setActiveTab(tab.key)}
+ onClick={() => onActiveTabChange(tab.key)}
>
{tab.name}
diff --git a/web/app/components/workflow/constants.ts b/web/app/components/workflow/constants.ts
index a77e092963bb3e..070748bab0ec9c 100644
--- a/web/app/components/workflow/constants.ts
+++ b/web/app/components/workflow/constants.ts
@@ -12,6 +12,7 @@ import HttpRequestDefault from './nodes/http/default'
import ParameterExtractorDefault from './nodes/parameter-extractor/default'
import ToolDefault from './nodes/tool/default'
import VariableAssignerDefault from './nodes/variable-assigner/default'
+import AssignerDefault from './nodes/assigner/default'
import EndNodeDefault from './nodes/end/default'
import IterationDefault from './nodes/iteration/default'
@@ -133,6 +134,15 @@ export const NODES_EXTRA_DATA: Record = {
getAvailableNextNodes: VariableAssignerDefault.getAvailableNextNodes,
checkValid: VariableAssignerDefault.checkValid,
},
+ [BlockEnum.Assigner]: {
+ author: 'Dify',
+ about: '',
+ availablePrevNodes: [],
+ availableNextNodes: [],
+ getAvailablePrevNodes: AssignerDefault.getAvailablePrevNodes,
+ getAvailableNextNodes: AssignerDefault.getAvailableNextNodes,
+ checkValid: AssignerDefault.checkValid,
+ },
[BlockEnum.VariableAggregator]: {
author: 'Dify',
about: '',
@@ -268,6 +278,12 @@ export const NODES_INITIAL_DATA = {
output_type: '',
...VariableAssignerDefault.defaultValue,
},
+ [BlockEnum.Assigner]: {
+ type: BlockEnum.Assigner,
+ title: '',
+ desc: '',
+ ...AssignerDefault.defaultValue,
+ },
[BlockEnum.Tool]: {
type: BlockEnum.Tool,
title: '',
diff --git a/web/app/components/workflow/header/chat-variable-button.tsx b/web/app/components/workflow/header/chat-variable-button.tsx
new file mode 100644
index 00000000000000..39745d4fb5dd3c
--- /dev/null
+++ b/web/app/components/workflow/header/chat-variable-button.tsx
@@ -0,0 +1,24 @@
+import { memo } from 'react'
+import Button from '@/app/components/base/button'
+import { BubbleX } from '@/app/components/base/icons/src/vender/line/others'
+import { useStore } from '@/app/components/workflow/store'
+
+const ChatVariableButton = ({ disabled }: { disabled: boolean }) => {
+ const setShowChatVariablePanel = useStore(s => s.setShowChatVariablePanel)
+ const setShowEnvPanel = useStore(s => s.setShowEnvPanel)
+ const setShowDebugAndPreviewPanel = useStore(s => s.setShowDebugAndPreviewPanel)
+
+ const handleClick = () => {
+ setShowChatVariablePanel(true)
+ setShowEnvPanel(false)
+ setShowDebugAndPreviewPanel(false)
+ }
+
+ return (
+
+
+
+ )
+}
+
+export default memo(ChatVariableButton)
diff --git a/web/app/components/workflow/header/env-button.tsx b/web/app/components/workflow/header/env-button.tsx
index f9327397164f74..71598776de1e8b 100644
--- a/web/app/components/workflow/header/env-button.tsx
+++ b/web/app/components/workflow/header/env-button.tsx
@@ -1,21 +1,23 @@
import { memo } from 'react'
+import Button from '@/app/components/base/button'
import { Env } from '@/app/components/base/icons/src/vender/line/others'
import { useStore } from '@/app/components/workflow/store'
-import cn from '@/utils/classnames'
-const EnvButton = () => {
+const EnvButton = ({ disabled }: { disabled: boolean }) => {
+ const setShowChatVariablePanel = useStore(s => s.setShowChatVariablePanel)
const setShowEnvPanel = useStore(s => s.setShowEnvPanel)
const setShowDebugAndPreviewPanel = useStore(s => s.setShowDebugAndPreviewPanel)
const handleClick = () => {
setShowEnvPanel(true)
+ setShowChatVariablePanel(false)
setShowDebugAndPreviewPanel(false)
}
return (
-
+
-
+
)
}
diff --git a/web/app/components/workflow/header/index.tsx b/web/app/components/workflow/header/index.tsx
index 75d5b29a834698..58624d8161dbed 100644
--- a/web/app/components/workflow/header/index.tsx
+++ b/web/app/components/workflow/header/index.tsx
@@ -19,6 +19,7 @@ import {
import type { StartNodeType } from '../nodes/start/types'
import {
useChecklistBeforePublish,
+ useIsChatMode,
useNodesReadOnly,
useNodesSyncDraft,
useWorkflowMode,
@@ -31,6 +32,7 @@ import EditingTitle from './editing-title'
import RunningTitle from './running-title'
import RestoringTitle from './restoring-title'
import ViewHistory from './view-history'
+import ChatVariableButton from './chat-variable-button'
import EnvButton from './env-button'
import Button from '@/app/components/base/button'
import { useStore as useAppStore } from '@/app/components/app/store'
@@ -44,7 +46,8 @@ const Header: FC = () => {
const appDetail = useAppStore(s => s.appDetail)
const appSidebarExpand = useAppStore(s => s.appSidebarExpand)
const appID = appDetail?.id
- const { getNodesReadOnly } = useNodesReadOnly()
+ const isChatMode = useIsChatMode()
+ const { nodesReadOnly, getNodesReadOnly } = useNodesReadOnly()
const publishedAt = useStore(s => s.publishedAt)
const draftUpdatedAt = useStore(s => s.draftUpdatedAt)
const toolPublished = useStore(s => s.toolPublished)
@@ -165,7 +168,8 @@ const Header: FC = () => {
{
normal && (
-
+ {isChatMode &&
}
+
@@ -176,7 +180,7 @@ const Header: FC = () => {
{...{
publishedAt,
draftUpdatedAt,
- disabled: Boolean(getNodesReadOnly()),
+ disabled: nodesReadOnly,
toolPublished,
inputs: variables,
onRefreshData: handleToolConfigureUpdate,
diff --git a/web/app/components/workflow/hooks/use-nodes-sync-draft.ts b/web/app/components/workflow/hooks/use-nodes-sync-draft.ts
index 78e065329508cf..06d0113df68d61 100644
--- a/web/app/components/workflow/hooks/use-nodes-sync-draft.ts
+++ b/web/app/components/workflow/hooks/use-nodes-sync-draft.ts
@@ -31,6 +31,7 @@ export const useNodesSyncDraft = () => {
const [x, y, zoom] = transform
const {
appId,
+ conversationVariables,
environmentVariables,
syncWorkflowDraftHash,
} = workflowStore.getState()
@@ -82,6 +83,7 @@ export const useNodesSyncDraft = () => {
file_upload: features.file,
},
environment_variables: environmentVariables,
+ conversation_variables: conversationVariables,
hash: syncWorkflowDraftHash,
},
}
diff --git a/web/app/components/workflow/hooks/use-workflow-interactions.ts b/web/app/components/workflow/hooks/use-workflow-interactions.ts
index dd54ec7401a8f6..820d5fe2fae603 100644
--- a/web/app/components/workflow/hooks/use-workflow-interactions.ts
+++ b/web/app/components/workflow/hooks/use-workflow-interactions.ts
@@ -68,6 +68,7 @@ export const useWorkflowUpdate = () => {
setIsSyncingWorkflowDraft,
setEnvironmentVariables,
setEnvSecrets,
+ setConversationVariables,
} = workflowStore.getState()
setIsSyncingWorkflowDraft(true)
fetchWorkflowDraft(`/apps/${appId}/workflows/draft`).then((response) => {
@@ -78,6 +79,8 @@ export const useWorkflowUpdate = () => {
return acc
}, {} as Record))
setEnvironmentVariables(response.environment_variables?.map(env => env.value_type === 'secret' ? { ...env, value: '[__HIDDEN__]' } : env) || [])
+ // #TODO chatVar sync#
+ setConversationVariables(response.conversation_variables || [])
}).finally(() => setIsSyncingWorkflowDraft(false))
}, [handleUpdateWorkflowCanvas, workflowStore])
diff --git a/web/app/components/workflow/hooks/use-workflow-start-run.tsx b/web/app/components/workflow/hooks/use-workflow-start-run.tsx
index 19dd94cf510633..b2b1c69975658a 100644
--- a/web/app/components/workflow/hooks/use-workflow-start-run.tsx
+++ b/web/app/components/workflow/hooks/use-workflow-start-run.tsx
@@ -67,9 +67,11 @@ export const useWorkflowStartRun = () => {
setShowDebugAndPreviewPanel,
setHistoryWorkflowData,
setShowEnvPanel,
+ setShowChatVariablePanel,
} = workflowStore.getState()
setShowEnvPanel(false)
+ setShowChatVariablePanel(false)
if (showDebugAndPreviewPanel)
handleCancelDebugAndPreviewPanel()
diff --git a/web/app/components/workflow/hooks/use-workflow-variables.ts b/web/app/components/workflow/hooks/use-workflow-variables.ts
index 081e8e624280f2..feadaf86594a26 100644
--- a/web/app/components/workflow/hooks/use-workflow-variables.ts
+++ b/web/app/components/workflow/hooks/use-workflow-variables.ts
@@ -12,6 +12,7 @@ import type {
export const useWorkflowVariables = () => {
const { t } = useTranslation()
const environmentVariables = useStore(s => s.environmentVariables)
+ const conversationVariables = useStore(s => s.conversationVariables)
const getNodeAvailableVars = useCallback(({
parentNode,
@@ -19,12 +20,14 @@ export const useWorkflowVariables = () => {
isChatMode,
filterVar,
hideEnv,
+ hideChatVar,
}: {
parentNode?: Node | null
beforeNodes: Node[]
isChatMode: boolean
filterVar: (payload: Var, selector: ValueSelector) => boolean
hideEnv?: boolean
+ hideChatVar?: boolean
}): NodeOutPutVar[] => {
return toNodeAvailableVars({
parentNode,
@@ -32,9 +35,10 @@ export const useWorkflowVariables = () => {
beforeNodes,
isChatMode,
environmentVariables: hideEnv ? [] : environmentVariables,
+ conversationVariables: (isChatMode && !hideChatVar) ? conversationVariables : [],
filterVar,
})
- }, [environmentVariables, t])
+ }, [conversationVariables, environmentVariables, t])
const getCurrentVariableType = useCallback(({
parentNode,
@@ -59,8 +63,9 @@ export const useWorkflowVariables = () => {
isChatMode,
isConstant,
environmentVariables,
+ conversationVariables,
})
- }, [environmentVariables])
+ }, [conversationVariables, environmentVariables])
return {
getNodeAvailableVars,
diff --git a/web/app/components/workflow/hooks/use-workflow.ts b/web/app/components/workflow/hooks/use-workflow.ts
index b1c97585099c0f..71f0600d391199 100644
--- a/web/app/components/workflow/hooks/use-workflow.ts
+++ b/web/app/components/workflow/hooks/use-workflow.ts
@@ -478,6 +478,8 @@ export const useWorkflowInit = () => {
return acc
}, {} as Record),
environmentVariables: res.environment_variables?.map(env => env.value_type === 'secret' ? { ...env, value: '[__HIDDEN__]' } : env) || [],
+ // #TODO chatVar sync#
+ conversationVariables: res.conversation_variables || [],
})
setSyncWorkflowDraftHash(res.hash)
setIsLoading(false)
@@ -498,6 +500,7 @@ export const useWorkflowInit = () => {
retriever_resource: { enabled: true },
},
environment_variables: [],
+ conversation_variables: [],
},
}).then((res) => {
workflowStore.getState().setDraftUpdatedAt(res.updated_at)
diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx
index 34c7fc4dccd956..c06e1b9524680b 100644
--- a/web/app/components/workflow/index.tsx
+++ b/web/app/components/workflow/index.tsx
@@ -264,7 +264,12 @@ const Workflow: FC = memo(({
const { shortcutsEnabled: workflowHistoryShortcutsEnabled } = useWorkflowHistoryStore()
- useKeyPress(['delete', 'backspace'], handleNodesDelete)
+ useKeyPress(['delete', 'backspace'], (e) => {
+ if (isEventTargetInputArea(e.target as HTMLElement))
+ return
+
+ handleNodesDelete()
+ })
useKeyPress(['delete', 'backspace'], handleEdgeDelete)
useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.c`, (e) => {
if (isEventTargetInputArea(e.target as HTMLElement))
diff --git a/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx b/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx
index f03a68d7ee5ee8..657b910ee92efb 100644
--- a/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx
+++ b/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx
@@ -64,6 +64,7 @@ const AddVariablePopupWithPosition = ({
} as any,
],
hideEnv: true,
+ hideChatVar: true,
isChatMode,
filterVar: filterVar(outputType as VarType),
})
diff --git a/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx b/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx
index 2a66f2a59aa8d1..cde437c4c5fca3 100644
--- a/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx
+++ b/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx
@@ -18,6 +18,8 @@ import { useFeatures } from '@/app/components/base/features/hooks'
import { VarBlockIcon } from '@/app/components/workflow/block-icon'
import { Line3 } from '@/app/components/base/icons/src/public/common'
import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development'
+import { BubbleX } from '@/app/components/base/icons/src/vender/line/others'
+import cn from '@/utils/classnames'
type Props = {
payload: InputVar
@@ -56,22 +58,24 @@ const FormItem: FC = ({
}, [value, onChange])
const nodeKey = (() => {
if (typeof payload.label === 'object') {
- const { nodeType, nodeName, variable } = payload.label
+ const { nodeType, nodeName, variable, isChatVar } = payload.label
return (
-
-
-
+ {!isChatVar && (
+
-
- {nodeName}
-
-
-
-
+ )}
-
-
+ {!isChatVar &&
}
+ {isChatVar &&
}
+
{variable}
@@ -86,7 +90,12 @@ const FormItem: FC
= ({
const isIterator = type === InputVarType.iterator
return (
- {!isArrayLikeType &&
{typeof payload.label === 'object' ? nodeKey : payload.label}
}
+ {!isArrayLikeType && (
+
+
{typeof payload.label === 'object' ? nodeKey : payload.label}
+ {!payload.required &&
{t('workflow.panel.optional')} }
+
+ )}
{
type === InputVarType.textInput && (
diff --git a/web/app/components/workflow/nodes/_base/components/editor/code-editor/index.tsx b/web/app/components/workflow/nodes/_base/components/editor/code-editor/index.tsx
index c4348871d227d8..8f77f96bde92f2 100644
--- a/web/app/components/workflow/nodes/_base/components/editor/code-editor/index.tsx
+++ b/web/app/components/workflow/nodes/_base/components/editor/code-editor/index.tsx
@@ -15,7 +15,7 @@ const CODE_EDITOR_LINE_HEIGHT = 18
export type Props = {
value?: string | object
- placeholder?: string
+ placeholder?: JSX.Element | string
onChange?: (value: string) => void
title?: JSX.Element
language: CodeLanguage
@@ -167,7 +167,7 @@ const CodeEditor: FC
= ({
}}
onMount={handleEditorDidMount}
/>
- {!outPutValue && {placeholder}
}
+ {!outPutValue && !isFocus && {placeholder}
}
>
)
diff --git a/web/app/components/workflow/nodes/_base/components/input-support-select-var.tsx b/web/app/components/workflow/nodes/_base/components/input-support-select-var.tsx
index 3f4e7d8c460ccb..2a2b77268e56ca 100644
--- a/web/app/components/workflow/nodes/_base/components/input-support-select-var.tsx
+++ b/web/app/components/workflow/nodes/_base/components/input-support-select-var.tsx
@@ -26,6 +26,7 @@ type Props = {
justVar?: boolean
nodesOutputVars?: NodeOutPutVar[]
availableNodes?: Node[]
+ insertVarTipToLeft?: boolean
}
const Editor: FC = ({
@@ -40,6 +41,7 @@ const Editor: FC = ({
readOnly,
nodesOutputVars,
availableNodes = [],
+ insertVarTipToLeft,
}) => {
const { t } = useTranslation()
@@ -106,12 +108,12 @@ const Editor: FC = ({
{/* to patch Editor not support dynamic change editable status */}
{readOnly &&
}
{isFocus && (
-
+
diff --git a/web/app/components/workflow/nodes/_base/components/option-card.tsx b/web/app/components/workflow/nodes/_base/components/option-card.tsx
index 62fe8937fd43dd..71c2c2958db725 100644
--- a/web/app/components/workflow/nodes/_base/components/option-card.tsx
+++ b/web/app/components/workflow/nodes/_base/components/option-card.tsx
@@ -45,7 +45,7 @@ const OptionCard: FC
= ({
return (
= ({
const value = vars[index].split('.')
const isSystem = isSystemVar(value)
const isEnv = isENV(value)
+ const isChatVar = isConversationVar(value)
const node = (isSystem ? startNode : getNodeInfoById(availableNodes, value[0]))?.data
const varName = `${isSystem ? 'sys.' : ''}${value[value.length - 1]}`
return (
{str}
- {!isEnv && (
+ {!isEnv && !isChatVar && (
= ({
)}
- {!isEnv &&
}
+ {!isEnv && !isChatVar &&
}
{isEnv &&
}
-
{varName}
+ {isChatVar &&
}
+
{varName}
)
diff --git a/web/app/components/workflow/nodes/_base/components/selector.tsx b/web/app/components/workflow/nodes/_base/components/selector.tsx
index 104500f3c5472f..3e401076af2035 100644
--- a/web/app/components/workflow/nodes/_base/components/selector.tsx
+++ b/web/app/components/workflow/nodes/_base/components/selector.tsx
@@ -10,6 +10,7 @@ type Item = {
label: string
}
type Props = {
+ className?: string
trigger?: JSX.Element
DropDownIcon?: any
noLeft?: boolean
@@ -27,6 +28,7 @@ type Props = {
}
const TypeSelector: FC
= ({
+ className,
trigger,
DropDownIcon = ChevronSelectorVertical,
noLeft,
@@ -50,11 +52,12 @@ const TypeSelector: FC = ({
setHide()
}, ref)
return (
-
+
{trigger
? (
{trigger}
@@ -63,13 +66,13 @@ const TypeSelector: FC
= ({
-
{!noValue ? item?.label : placeholder}
+
{!noValue ? item?.label : placeholder}
{!readonly &&
}
)}
{(showOption && !readonly) && (
-
+
{list.map(item => (
node.id === valueSelector[0])
}, [nodes, valueSelector])
const isEnv = isENV(valueSelector)
+ const isChatVar = isConversationVar(valueSelector)
const variableName = isSystemVar(valueSelector) ? valueSelector.slice(0).join('.') : valueSelector.slice(1).join('.')
return (
- {!isEnv && (
+ {!isEnv && !isChatVar && (
<>
{node && (
)}
{isEnv &&
}
+ {isChatVar &&
}
{variableName}
diff --git a/web/app/components/workflow/nodes/_base/components/variable/constant-field.tsx b/web/app/components/workflow/nodes/_base/components/variable/constant-field.tsx
index bd7d1599066834..802e778c2cd152 100644
--- a/web/app/components/workflow/nodes/_base/components/variable/constant-field.tsx
+++ b/web/app/components/workflow/nodes/_base/components/variable/constant-field.tsx
@@ -9,14 +9,14 @@ import type { Var } from '@/app/components/workflow/types'
import { SimpleSelect } from '@/app/components/base/select'
type Props = {
- schema: CredentialFormSchema
+ schema: Partial
readonly: boolean
value: string
onChange: (value: string | number, varKindType: VarKindType, varInfo?: Var) => void
}
const ConstantField: FC = ({
- schema,
+ schema = {} as CredentialFormSchema,
readonly,
value,
onChange,
@@ -47,7 +47,7 @@ const ConstantField: FC = ({
{schema.type === FormTypeEnum.textNumber && (
{
return valueSelector[0] === 'env'
}
+export const isConversationVar = (valueSelector: ValueSelector) => {
+ return valueSelector[0] === 'conversation'
+}
+
const inputVarTypeToVarType = (type: InputVarType): VarType => {
if (type === InputVarType.number)
return VarType.number
@@ -246,13 +250,32 @@ const formatItem = (
}) as Var[]
break
}
+
+ case 'conversation': {
+ res.vars = data.chatVarList.map((chatVar: ConversationVariable) => {
+ return {
+ variable: `conversation.${chatVar.name}`,
+ type: chatVar.value_type,
+ des: chatVar.description,
+ }
+ }) as Var[]
+ break
+ }
}
const selector = [id]
res.vars = res.vars.filter((v) => {
const { children } = v
- if (!children)
- return filterVar(v, selector)
+ if (!children) {
+ return filterVar(v, (() => {
+ const variableArr = v.variable.split('.')
+ const [first, ..._other] = variableArr
+ if (first === 'sys' || first === 'env' || first === 'conversation')
+ return variableArr
+
+ return [...selector, ...variableArr]
+ })())
+ }
const obj = findExceptVarInObject(v, filterVar, selector)
return obj?.children && obj?.children.length > 0
@@ -271,6 +294,7 @@ export const toNodeOutputVars = (
isChatMode: boolean,
filterVar = (_payload: Var, _selector: ValueSelector) => true,
environmentVariables: EnvironmentVariable[] = [],
+ conversationVariables: ConversationVariable[] = [],
): NodeOutPutVar[] => {
// ENV_NODE data format
const ENV_NODE = {
@@ -281,9 +305,19 @@ export const toNodeOutputVars = (
envList: environmentVariables,
},
}
+ // CHAT_VAR_NODE data format
+ const CHAT_VAR_NODE = {
+ id: 'conversation',
+ data: {
+ title: 'CONVERSATION',
+ type: 'conversation',
+ chatVarList: conversationVariables,
+ },
+ }
const res = [
...nodes.filter(node => SUPPORT_OUTPUT_VARS_NODE.includes(node.data.type)),
...(environmentVariables.length > 0 ? [ENV_NODE] : []),
+ ...((isChatMode && conversationVariables.length) > 0 ? [CHAT_VAR_NODE] : []),
].map((node) => {
return {
...formatItem(node, isChatMode, filterVar),
@@ -348,6 +382,7 @@ export const getVarType = ({
isChatMode,
isConstant,
environmentVariables = [],
+ conversationVariables = [],
}:
{
valueSelector: ValueSelector
@@ -357,6 +392,7 @@ export const getVarType = ({
isChatMode: boolean
isConstant?: boolean
environmentVariables?: EnvironmentVariable[]
+ conversationVariables?: ConversationVariable[]
}): VarType => {
if (isConstant)
return VarType.string
@@ -366,6 +402,7 @@ export const getVarType = ({
isChatMode,
undefined,
environmentVariables,
+ conversationVariables,
)
const isIterationInnerVar = parentNode?.data.type === BlockEnum.Iteration
@@ -388,6 +425,7 @@ export const getVarType = ({
}
const isSystem = isSystemVar(valueSelector)
const isEnv = isENV(valueSelector)
+ const isChatVar = isConversationVar(valueSelector)
const startNode = availableNodes.find((node: any) => {
return node.data.type === BlockEnum.Start
})
@@ -400,7 +438,7 @@ export const getVarType = ({
let type: VarType = VarType.string
let curr: any = targetVar.vars
- if (isSystem || isEnv) {
+ if (isSystem || isEnv || isChatVar) {
return curr.find((v: any) => v.variable === (valueSelector as ValueSelector).join('.'))?.type
}
else {
@@ -426,6 +464,7 @@ export const toNodeAvailableVars = ({
beforeNodes,
isChatMode,
environmentVariables,
+ conversationVariables,
filterVar,
}: {
parentNode?: Node | null
@@ -435,6 +474,8 @@ export const toNodeAvailableVars = ({
isChatMode: boolean
// env
environmentVariables?: EnvironmentVariable[]
+ // chat var
+ conversationVariables?: ConversationVariable[]
filterVar: (payload: Var, selector: ValueSelector) => boolean
}): NodeOutPutVar[] => {
const beforeNodesOutputVars = toNodeOutputVars(
@@ -442,6 +483,7 @@ export const toNodeAvailableVars = ({
isChatMode,
filterVar,
environmentVariables,
+ conversationVariables,
)
const isInIteration = parentNode?.data.type === BlockEnum.Iteration
if (isInIteration) {
@@ -453,6 +495,7 @@ export const toNodeAvailableVars = ({
availableNodes: beforeNodes,
isChatMode,
environmentVariables,
+ conversationVariables,
})
const iterationVar = {
nodeId: iterationNode?.id,
diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx
index 9a41e60f39a4ef..e2b1f0a31c805c 100644
--- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx
+++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx
@@ -9,7 +9,7 @@ import {
import produce from 'immer'
import { useStoreApi } from 'reactflow'
import VarReferencePopup from './var-reference-popup'
-import { getNodeInfoById, isENV, isSystemVar } from './utils'
+import { getNodeInfoById, isConversationVar, isENV, isSystemVar } from './utils'
import ConstantField from './constant-field'
import cn from '@/utils/classnames'
import type { Node, NodeOutPutVar, ValueSelector, Var } from '@/app/components/workflow/types'
@@ -17,7 +17,7 @@ import type { CredentialFormSchema } from '@/app/components/header/account-setti
import { BlockEnum } from '@/app/components/workflow/types'
import { VarBlockIcon } from '@/app/components/workflow/block-icon'
import { Line3 } from '@/app/components/base/icons/src/public/common'
-import { Env } from '@/app/components/base/icons/src/vender/line/others'
+import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others'
import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development'
import {
PortalToFollowElem,
@@ -32,6 +32,7 @@ import {
import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types'
import TypeSelector from '@/app/components/workflow/nodes/_base/components/selector'
import AddButton from '@/app/components/base/button/add-button'
+import Badge from '@/app/components/base/badge'
const TRIGGER_DEFAULT_WIDTH = 227
type Props = {
@@ -49,7 +50,8 @@ type Props = {
availableNodes?: Node[]
availableVars?: NodeOutPutVar[]
isAddBtnTrigger?: boolean
- schema?: CredentialFormSchema
+ schema?: Partial
+ valueTypePlaceHolder?: string
}
const VarReferencePicker: FC = ({
@@ -57,7 +59,7 @@ const VarReferencePicker: FC = ({
readonly,
className,
isShowNodeName,
- value,
+ value = [],
onOpen = () => { },
onChange,
isSupportConstantValue,
@@ -68,6 +70,7 @@ const VarReferencePicker: FC = ({
availableVars,
isAddBtnTrigger,
schema,
+ valueTypePlaceHolder,
}) => {
const { t } = useTranslation()
const store = useStoreApi()
@@ -99,7 +102,6 @@ const VarReferencePicker: FC = ({
const [varKindType, setVarKindType] = useState(defaultVarKindType)
const isConstant = isSupportConstantValue && varKindType === VarKindType.constant
-
const outputVars = useMemo(() => {
if (availableVars)
return availableVars
@@ -215,6 +217,7 @@ const VarReferencePicker: FC = ({
})
const isEnv = isENV(value as ValueSelector)
+ const isChatVar = isConversationVar(value as ValueSelector)
// 8(left/right-padding) + 14(icon) + 4 + 14 + 2 = 42 + 17 buff
const availableWidth = triggerWidth - 56
@@ -227,6 +230,8 @@ const VarReferencePicker: FC = ({
return [maxNodeNameWidth, maxVarNameWidth, maxTypeWidth]
})()
+ const WrapElem = isSupportConstantValue ? 'div' : PortalToFollowElemTrigger
+ const VarPickerWrap = !isSupportConstantValue ? 'div' : PortalToFollowElemTrigger
return (
= ({
onOpenChange={setOpen}
placement={isAddBtnTrigger ? 'bottom-end' : 'bottom-start'}
>
- {
+ {
if (readonly)
return
!isConstant ? setOpen(!open) : setControlFocus(Date.now())
@@ -245,23 +250,28 @@ const VarReferencePicker: FC = ({
{ }}>
)
- : (
+ : (
{isSupportConstantValue
?
{
e.stopPropagation()
setOpen(false)
setControlFocus(Date.now())
- }} className='mr-1 flex items-center space-x-1'>
+ }} className='h-full mr-1 flex items-center space-x-1'>
+ {varKindTypes.find(item => item.value === varKindType)?.label}
+
+
+ }
+ popupClassName='top-8'
readonly={readonly}
- DropDownIcon={RiArrowDownSLine}
value={varKindType}
options={varKindTypes}
onChange={handleVarKindTypeChange}
+ showChecked
/>
-
: (!hasValue &&
@@ -276,38 +286,51 @@ const VarReferencePicker: FC
= ({
/>
)
: (
-
- {hasValue
- ? (
- <>
- {isShowNodeName && !isEnv && (
-
-
-
+
{
+ if (readonly)
+ return
+ !isConstant ? setOpen(!open) : setControlFocus(Date.now())
+ }}
+ className='grow h-full'
+ >
+
+
+ {hasValue
+ ? (
+ <>
+ {isShowNodeName && !isEnv && !isChatVar && (
+
+
+
+
+
{outputVarNode?.title}
+
+
+ )}
+
+ {!hasValue &&
}
+ {isEnv &&
}
+ {isChatVar &&
}
+
{varName}
-
{outputVarNode?.title}
-
-
- )}
-
- {!hasValue &&
}
- {isEnv &&
}
-
{varName}
-
-
{type}
- >
- )
- :
{t('workflow.common.setVarValuePlaceholder')}
}
-
+ {type}
+ >
+ )
+ : {t('workflow.common.setVarValuePlaceholder')}
}
+
+
+
+
)}
{(hasValue && !readonly) && (
= ({
>
)}
+ {!hasValue && valueTypePlaceHolder && (
+
+ )}
)}
-
+
diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx
index d6e231c6f22e73..fdd37d051846a3 100644
--- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx
+++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx
@@ -16,7 +16,7 @@ import {
PortalToFollowElemTrigger,
} from '@/app/components/base/portal-to-follow-elem'
import { XCircle } from '@/app/components/base/icons/src/vender/solid/general'
-import { Env } from '@/app/components/base/icons/src/vender/line/others'
+import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others'
import { checkKeys } from '@/utils/var'
type ObjectChildrenProps = {
@@ -51,6 +51,7 @@ const Item: FC = ({
const isObj = itemData.type === VarType.object && itemData.children && itemData.children.length > 0
const isSys = itemData.variable.startsWith('sys.')
const isEnv = itemData.variable.startsWith('env.')
+ const isChatVar = itemData.variable.startsWith('conversation.')
const itemRef = useRef(null)
const [isItemHovering, setIsItemHovering] = useState(false)
const _ = useHover(itemRef, {
@@ -79,7 +80,7 @@ const Item: FC = ({
}, [isHovering])
const handleChosen = (e: React.MouseEvent) => {
e.stopPropagation()
- if (isSys || isEnv) { // system variable or environment variable
+ if (isSys || isEnv || isChatVar) { // system variable | environment variable | conversation variable
onChange([...objPath, ...itemData.variable.split('.')], itemData)
}
else {
@@ -100,13 +101,21 @@ const Item: FC = ({
isHovering && (isObj ? 'bg-primary-50' : 'bg-gray-50'),
'relative w-full flex items-center h-6 pl-3 rounded-md cursor-pointer')
}
- // style={{ width: itemWidth || 252 }}
onClick={handleChosen}
>
- {!isEnv &&
}
+ {!isEnv && !isChatVar &&
}
{isEnv &&
}
-
{!isEnv ? itemData.variable : itemData.variable.replace('env.', '')}
+ {isChatVar &&
}
+ {!isEnv && !isChatVar && (
+
{itemData.variable}
+ )}
+ {isEnv && (
+
{itemData.variable.replace('env.', '')}
+ )}
+ {isChatVar && (
+
{itemData.variable.replace('conversation.', '')}
+ )}
{itemData.type}
{isObj && (
@@ -211,7 +220,7 @@ const VarReferenceVars: FC = ({
const [searchText, setSearchText] = useState('')
const filteredVars = vars.filter((v) => {
- const children = v.vars.filter(v => checkKeys([v.variable], false).isValid || v.variable.startsWith('sys.') || v.variable.startsWith('env.'))
+ const children = v.vars.filter(v => checkKeys([v.variable], false).isValid || v.variable.startsWith('sys.') || v.variable.startsWith('env.') || v.variable.startsWith('conversation.'))
return children.length > 0
}).filter((node) => {
if (!searchText)
@@ -222,7 +231,7 @@ const VarReferenceVars: FC = ({
})
return children.length > 0
}).map((node) => {
- let vars = node.vars.filter(v => checkKeys([v.variable], false).isValid || v.variable.startsWith('sys.') || v.variable.startsWith('env.'))
+ let vars = node.vars.filter(v => checkKeys([v.variable], false).isValid || v.variable.startsWith('sys.') || v.variable.startsWith('env.') || v.variable.startsWith('conversation.'))
if (searchText) {
const searchTextLower = searchText.toLowerCase()
if (!node.title.toLowerCase().includes(searchTextLower))
diff --git a/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts b/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts
index ef3d6659102b42..b81feab8053700 100644
--- a/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts
+++ b/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts
@@ -7,12 +7,16 @@ import {
import type { ValueSelector, Var } from '@/app/components/workflow/types'
type Params = {
onlyLeafNodeVar?: boolean
+ hideEnv?: boolean
+ hideChatVar?: boolean
filterVar: (payload: Var, selector: ValueSelector) => boolean
}
const useAvailableVarList = (nodeId: string, {
onlyLeafNodeVar,
filterVar,
+ hideEnv,
+ hideChatVar,
}: Params = {
onlyLeafNodeVar: false,
filterVar: () => true,
@@ -32,6 +36,8 @@ const useAvailableVarList = (nodeId: string, {
beforeNodes: availableNodes,
isChatMode,
filterVar,
+ hideEnv,
+ hideChatVar,
})
return {
diff --git a/web/app/components/workflow/nodes/_base/hooks/use-node-help-link.ts b/web/app/components/workflow/nodes/_base/hooks/use-node-help-link.ts
index 2bfcb4400690bf..861018caa9fd5f 100644
--- a/web/app/components/workflow/nodes/_base/hooks/use-node-help-link.ts
+++ b/web/app/components/workflow/nodes/_base/hooks/use-node-help-link.ts
@@ -24,6 +24,7 @@ export const useNodeHelpLink = (nodeType: BlockEnum) => {
[BlockEnum.TemplateTransform]: 'template',
[BlockEnum.VariableAssigner]: 'variable_assigner',
[BlockEnum.VariableAggregator]: 'variable_assigner',
+ [BlockEnum.Assigner]: 'variable_assignment',
[BlockEnum.Iteration]: 'iteration',
[BlockEnum.ParameterExtractor]: 'parameter_extractor',
[BlockEnum.HttpRequest]: 'http_request',
@@ -43,6 +44,7 @@ export const useNodeHelpLink = (nodeType: BlockEnum) => {
[BlockEnum.TemplateTransform]: 'template',
[BlockEnum.VariableAssigner]: 'variable-assigner',
[BlockEnum.VariableAggregator]: 'variable-assigner',
+ [BlockEnum.Assigner]: 'variable-assignment',
[BlockEnum.Iteration]: 'iteration',
[BlockEnum.ParameterExtractor]: 'parameter-extractor',
[BlockEnum.HttpRequest]: 'http-request',
diff --git a/web/app/components/workflow/nodes/_base/hooks/use-one-step-run.ts b/web/app/components/workflow/nodes/_base/hooks/use-one-step-run.ts
index d14fc939dafd16..0a6a7a9c1bbdc8 100644
--- a/web/app/components/workflow/nodes/_base/hooks/use-one-step-run.ts
+++ b/web/app/components/workflow/nodes/_base/hooks/use-one-step-run.ts
@@ -7,12 +7,12 @@ import {
useNodeDataUpdate,
useWorkflow,
} from '@/app/components/workflow/hooks'
-import { getNodeInfoById, isENV, isSystemVar, toNodeOutputVars } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { getNodeInfoById, isConversationVar, isENV, isSystemVar, toNodeOutputVars } from '@/app/components/workflow/nodes/_base/components/variable/utils'
import type { CommonNodeType, InputVar, ValueSelector, Var, Variable } from '@/app/components/workflow/types'
import { BlockEnum, InputVarType, NodeRunningStatus, VarType } from '@/app/components/workflow/types'
import { useStore as useAppStore } from '@/app/components/app/store'
-import { useWorkflowStore } from '@/app/components/workflow/store'
+import { useStore, useWorkflowStore } from '@/app/components/workflow/store'
import { getIterationSingleNodeRunUrl, singleNodeRun } from '@/service/workflow'
import Toast from '@/app/components/base/toast'
import LLMDefault from '@/app/components/workflow/nodes/llm/default'
@@ -95,12 +95,13 @@ const useOneStepRun = ({
}: Params) => {
const { t } = useTranslation()
const { getBeforeNodesInSameBranch, getBeforeNodesInSameBranchIncludeParent } = useWorkflow() as any
+ const conversationVariables = useStore(s => s.conversationVariables)
const isChatMode = useIsChatMode()
const isIteration = data.type === BlockEnum.Iteration
const availableNodes = getBeforeNodesInSameBranch(id)
const availableNodesIncludeParent = getBeforeNodesInSameBranchIncludeParent(id)
- const allOutputVars = toNodeOutputVars(availableNodes, isChatMode)
+ const allOutputVars = toNodeOutputVars(availableNodes, isChatMode, undefined, undefined, conversationVariables)
const getVar = (valueSelector: ValueSelector): Var | undefined => {
let res: Var | undefined
const isSystem = valueSelector[0] === 'sys'
@@ -116,7 +117,8 @@ const useOneStepRun = ({
valueSelector.slice(1).forEach((key, i) => {
const isLast = i === valueSelector.length - 2
- curr = curr?.find((v: any) => v.variable === key)
+ // conversation variable is start with 'conversation.'
+ curr = curr?.find((v: any) => v.variable.replace('conversation.', '') === key)
if (isLast) {
res = curr
}
@@ -369,6 +371,7 @@ const useOneStepRun = ({
nodeType: varInfo?.type,
nodeName: varInfo?.title || availableNodesIncludeParent[0]?.data.title, // default start node title
variable: isSystemVar(item) ? item.join('.') : item[item.length - 1],
+ isChatVar: isConversationVar(item),
},
variable: `#${item.join('.')}#`,
value_selector: item,
diff --git a/web/app/components/workflow/nodes/answer/panel.tsx b/web/app/components/workflow/nodes/answer/panel.tsx
index feb07c36c9aa62..daa5be4e666d17 100644
--- a/web/app/components/workflow/nodes/answer/panel.tsx
+++ b/web/app/components/workflow/nodes/answer/panel.tsx
@@ -23,6 +23,8 @@ const Panel: FC> = ({
const { availableVars, availableNodesWithParent } = useAvailableVarList(id, {
onlyLeafNodeVar: false,
+ hideChatVar: true,
+ hideEnv: true,
filterVar,
})
diff --git a/web/app/components/workflow/nodes/assigner/default.ts b/web/app/components/workflow/nodes/assigner/default.ts
new file mode 100644
index 00000000000000..6a8245b6d5d4e9
--- /dev/null
+++ b/web/app/components/workflow/nodes/assigner/default.ts
@@ -0,0 +1,46 @@
+import { BlockEnum } from '../../types'
+import type { NodeDefault } from '../../types'
+import { type AssignerNodeType, WriteMode } from './types'
+import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
+const i18nPrefix = 'workflow.errorMsg'
+
+const nodeDefault: NodeDefault = {
+ defaultValue: {
+ assigned_variable_selector: [],
+ write_mode: WriteMode.Overwrite,
+ input_variable_selector: [],
+ },
+ getAvailablePrevNodes(isChatMode: boolean) {
+ const nodes = isChatMode
+ ? ALL_CHAT_AVAILABLE_BLOCKS
+ : ALL_COMPLETION_AVAILABLE_BLOCKS.filter(type => type !== BlockEnum.End)
+ return nodes
+ },
+ getAvailableNextNodes(isChatMode: boolean) {
+ const nodes = isChatMode ? ALL_CHAT_AVAILABLE_BLOCKS : ALL_COMPLETION_AVAILABLE_BLOCKS
+ return nodes
+ },
+ checkValid(payload: AssignerNodeType, t: any) {
+ let errorMessages = ''
+ const {
+ assigned_variable_selector: assignedVarSelector,
+ write_mode: writeMode,
+ input_variable_selector: toAssignerVarSelector,
+ } = payload
+
+ if (!errorMessages && !assignedVarSelector?.length)
+ errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.assignedVariable') })
+
+ if (!errorMessages && writeMode !== WriteMode.Clear) {
+ if (!toAssignerVarSelector?.length)
+ errorMessages = t(`${i18nPrefix}.fieldRequired`, { field: t('workflow.nodes.assigner.variable') })
+ }
+
+ return {
+ isValid: !errorMessages,
+ errorMessage: errorMessages,
+ }
+ },
+}
+
+export default nodeDefault
diff --git a/web/app/components/workflow/nodes/assigner/node.tsx b/web/app/components/workflow/nodes/assigner/node.tsx
new file mode 100644
index 00000000000000..72745a488a3fa4
--- /dev/null
+++ b/web/app/components/workflow/nodes/assigner/node.tsx
@@ -0,0 +1,47 @@
+import type { FC } from 'react'
+import React from 'react'
+import { useNodes } from 'reactflow'
+import { useTranslation } from 'react-i18next'
+import NodeVariableItem from '../variable-assigner/components/node-variable-item'
+import { type AssignerNodeType } from './types'
+import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { BlockEnum, type Node, type NodeProps } from '@/app/components/workflow/types'
+
+const i18nPrefix = 'workflow.nodes.assigner'
+
+const NodeComponent: FC> = ({
+ data,
+}) => {
+ const { t } = useTranslation()
+
+ const nodes: Node[] = useNodes()
+ const { assigned_variable_selector: variable, write_mode: writeMode } = data
+
+ if (!variable || variable.length === 0)
+ return null
+
+ const isSystem = isSystemVar(variable)
+ const isEnv = isENV(variable)
+ const isChatVar = isConversationVar(variable)
+
+ const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0])
+ const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.')
+ return (
+
+
{t(`${i18nPrefix}.assignedVariable`)}
+
+
+
{t(`${i18nPrefix}.writeMode`)}
+
{t(`${i18nPrefix}.${writeMode}`)}
+
+
+ )
+}
+
+export default React.memo(NodeComponent)
diff --git a/web/app/components/workflow/nodes/assigner/panel.tsx b/web/app/components/workflow/nodes/assigner/panel.tsx
new file mode 100644
index 00000000000000..6bba0d2a316df2
--- /dev/null
+++ b/web/app/components/workflow/nodes/assigner/panel.tsx
@@ -0,0 +1,87 @@
+import type { FC } from 'react'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+
+import VarReferencePicker from '../_base/components/variable/var-reference-picker'
+import OptionCard from '../_base/components/option-card'
+import useConfig from './use-config'
+import { WriteMode } from './types'
+import type { AssignerNodeType } from './types'
+import Field from '@/app/components/workflow/nodes/_base/components/field'
+import { type NodePanelProps } from '@/app/components/workflow/types'
+import cn from '@/utils/classnames'
+
+const i18nPrefix = 'workflow.nodes.assigner'
+
+const Panel: FC> = ({
+ id,
+ data,
+}) => {
+ const { t } = useTranslation()
+
+ const {
+ readOnly,
+ inputs,
+ handleAssignedVarChanges,
+ isSupportAppend,
+ writeModeTypes,
+ handleWriteModeChange,
+ filterAssignedVar,
+ filterToAssignedVar,
+ handleToAssignedVarChange,
+ toAssignedVarType,
+ } = useConfig(id, data)
+
+ return (
+
+
+
+
+
+
+
+ {writeModeTypes.map(type => (
+
+ ))}
+
+
+ {inputs.write_mode !== WriteMode.Clear && (
+
+
+
+ )}
+
+
+
+ )
+}
+
+export default React.memo(Panel)
diff --git a/web/app/components/workflow/nodes/assigner/types.ts b/web/app/components/workflow/nodes/assigner/types.ts
new file mode 100644
index 00000000000000..d152249d192818
--- /dev/null
+++ b/web/app/components/workflow/nodes/assigner/types.ts
@@ -0,0 +1,13 @@
+import type { CommonNodeType, ValueSelector } from '@/app/components/workflow/types'
+
+export enum WriteMode {
+ Overwrite = 'over-write',
+ Append = 'append',
+ Clear = 'clear',
+}
+
+export type AssignerNodeType = CommonNodeType & {
+ assigned_variable_selector: ValueSelector
+ write_mode: WriteMode
+ input_variable_selector: ValueSelector
+}
diff --git a/web/app/components/workflow/nodes/assigner/use-config.ts b/web/app/components/workflow/nodes/assigner/use-config.ts
new file mode 100644
index 00000000000000..76cf737540e8bc
--- /dev/null
+++ b/web/app/components/workflow/nodes/assigner/use-config.ts
@@ -0,0 +1,144 @@
+import { useCallback, useMemo } from 'react'
+import produce from 'immer'
+import { useStoreApi } from 'reactflow'
+import { isEqual } from 'lodash-es'
+import { VarType } from '../../types'
+import type { ValueSelector, Var } from '../../types'
+import { type AssignerNodeType, WriteMode } from './types'
+import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud'
+import {
+ useIsChatMode,
+ useNodesReadOnly,
+ useWorkflow,
+ useWorkflowVariables,
+} from '@/app/components/workflow/hooks'
+
+const useConfig = (id: string, payload: AssignerNodeType) => {
+ const { nodesReadOnly: readOnly } = useNodesReadOnly()
+ const isChatMode = useIsChatMode()
+
+ const store = useStoreApi()
+ const { getBeforeNodesInSameBranch } = useWorkflow()
+
+ const {
+ getNodes,
+ } = store.getState()
+ const currentNode = getNodes().find(n => n.id === id)
+ const isInIteration = payload.isInIteration
+ const iterationNode = isInIteration ? getNodes().find(n => n.id === currentNode!.parentId) : null
+ const availableNodes = useMemo(() => {
+ return getBeforeNodesInSameBranch(id)
+ }, [getBeforeNodesInSameBranch, id])
+ const { inputs, setInputs } = useNodeCrud(id, payload)
+
+ const { getCurrentVariableType } = useWorkflowVariables()
+ const assignedVarType = getCurrentVariableType({
+ parentNode: iterationNode,
+ valueSelector: inputs.assigned_variable_selector || [],
+ availableNodes,
+ isChatMode,
+ isConstant: false,
+ })
+
+ const isSupportAppend = useCallback((varType: VarType) => {
+ return [VarType.arrayString, VarType.arrayNumber, VarType.arrayObject].includes(varType)
+ }, [])
+
+ const isCurrSupportAppend = useMemo(() => isSupportAppend(assignedVarType), [assignedVarType, isSupportAppend])
+
+ const handleAssignedVarChanges = useCallback((variable: ValueSelector | string) => {
+ const newInputs = produce(inputs, (draft) => {
+ draft.assigned_variable_selector = variable as ValueSelector
+ draft.input_variable_selector = []
+
+ const newVarType = getCurrentVariableType({
+ parentNode: iterationNode,
+ valueSelector: draft.assigned_variable_selector || [],
+ availableNodes,
+ isChatMode,
+ isConstant: false,
+ })
+
+ if (inputs.write_mode === WriteMode.Append && !isSupportAppend(newVarType))
+ draft.write_mode = WriteMode.Overwrite
+ })
+ setInputs(newInputs)
+ }, [inputs, setInputs, getCurrentVariableType, iterationNode, availableNodes, isChatMode, isSupportAppend])
+
+ const writeModeTypes = [WriteMode.Overwrite, WriteMode.Append, WriteMode.Clear]
+
+ const handleWriteModeChange = useCallback((writeMode: WriteMode) => {
+ return () => {
+ const newInputs = produce(inputs, (draft) => {
+ draft.write_mode = writeMode
+ if (inputs.write_mode === WriteMode.Clear)
+ draft.input_variable_selector = []
+ })
+ setInputs(newInputs)
+ }
+ }, [inputs, setInputs])
+
+ const toAssignedVarType = useMemo(() => {
+ const { write_mode } = inputs
+ if (write_mode === WriteMode.Overwrite)
+ return assignedVarType
+ if (write_mode === WriteMode.Append) {
+ if (assignedVarType === VarType.arrayString)
+ return VarType.string
+ if (assignedVarType === VarType.arrayNumber)
+ return VarType.number
+ if (assignedVarType === VarType.arrayObject)
+ return VarType.object
+ }
+ return VarType.string
+ }, [assignedVarType, inputs])
+
+ const filterAssignedVar = useCallback((varPayload: Var, selector: ValueSelector) => {
+ return selector.join('.').startsWith('conversation')
+ }, [])
+
+ const filterToAssignedVar = useCallback((varPayload: Var, selector: ValueSelector) => {
+ if (isEqual(selector, inputs.assigned_variable_selector))
+ return false
+
+ if (inputs.write_mode === WriteMode.Overwrite) {
+ return varPayload.type === assignedVarType
+ }
+ else if (inputs.write_mode === WriteMode.Append) {
+ switch (assignedVarType) {
+ case VarType.arrayString:
+ return varPayload.type === VarType.string
+ case VarType.arrayNumber:
+ return varPayload.type === VarType.number
+ case VarType.arrayObject:
+ return varPayload.type === VarType.object
+ default:
+ return false
+ }
+ }
+ return true
+ }, [inputs.assigned_variable_selector, inputs.write_mode, assignedVarType])
+
+ const handleToAssignedVarChange = useCallback((value: ValueSelector | string) => {
+ const newInputs = produce(inputs, (draft) => {
+ draft.input_variable_selector = value as ValueSelector
+ })
+ setInputs(newInputs)
+ }, [inputs, setInputs])
+
+ return {
+ readOnly,
+ inputs,
+ handleAssignedVarChanges,
+ assignedVarType,
+ isSupportAppend: isCurrSupportAppend,
+ writeModeTypes,
+ handleWriteModeChange,
+ filterAssignedVar,
+ filterToAssignedVar,
+ handleToAssignedVarChange,
+ toAssignedVarType,
+ }
+}
+
+export default useConfig
diff --git a/web/app/components/workflow/nodes/assigner/utils.ts b/web/app/components/workflow/nodes/assigner/utils.ts
new file mode 100644
index 00000000000000..72678e45fb21bf
--- /dev/null
+++ b/web/app/components/workflow/nodes/assigner/utils.ts
@@ -0,0 +1,5 @@
+import type { AssignerNodeType } from './types'
+
+export const checkNodeValid = (payload: AssignerNodeType) => {
+ return true
+}
diff --git a/web/app/components/workflow/nodes/constants.ts b/web/app/components/workflow/nodes/constants.ts
index a97aa086edac9f..d4610bd8034c3d 100644
--- a/web/app/components/workflow/nodes/constants.ts
+++ b/web/app/components/workflow/nodes/constants.ts
@@ -24,6 +24,8 @@ import ToolNode from './tool/node'
import ToolPanel from './tool/panel'
import VariableAssignerNode from './variable-assigner/node'
import VariableAssignerPanel from './variable-assigner/panel'
+import AssignerNode from './assigner/node'
+import AssignerPanel from './assigner/panel'
import ParameterExtractorNode from './parameter-extractor/node'
import ParameterExtractorPanel from './parameter-extractor/panel'
import IterationNode from './iteration/node'
@@ -42,6 +44,7 @@ export const NodeComponentMap: Record> = {
[BlockEnum.HttpRequest]: HttpNode,
[BlockEnum.Tool]: ToolNode,
[BlockEnum.VariableAssigner]: VariableAssignerNode,
+ [BlockEnum.Assigner]: AssignerNode,
[BlockEnum.VariableAggregator]: VariableAssignerNode,
[BlockEnum.ParameterExtractor]: ParameterExtractorNode,
[BlockEnum.Iteration]: IterationNode,
@@ -61,6 +64,7 @@ export const PanelComponentMap: Record> = {
[BlockEnum.Tool]: ToolPanel,
[BlockEnum.VariableAssigner]: VariableAssignerPanel,
[BlockEnum.VariableAggregator]: VariableAssignerPanel,
+ [BlockEnum.Assigner]: AssignerPanel,
[BlockEnum.ParameterExtractor]: ParameterExtractorPanel,
[BlockEnum.Iteration]: IterationPanel,
}
diff --git a/web/app/components/workflow/nodes/end/node.tsx b/web/app/components/workflow/nodes/end/node.tsx
index cfcb2c12912be6..d5d2eaefd02910 100644
--- a/web/app/components/workflow/nodes/end/node.tsx
+++ b/web/app/components/workflow/nodes/end/node.tsx
@@ -3,7 +3,7 @@ import React from 'react'
import cn from 'classnames'
import type { EndNodeType } from './types'
import type { NodeProps, Variable } from '@/app/components/workflow/types'
-import { isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
import {
useIsChatMode,
useWorkflow,
@@ -12,7 +12,7 @@ import {
import { VarBlockIcon } from '@/app/components/workflow/block-icon'
import { Line3 } from '@/app/components/base/icons/src/public/common'
import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development'
-import { Env } from '@/app/components/base/icons/src/vender/line/others'
+import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others'
import { BlockEnum } from '@/app/components/workflow/types'
const Node: FC> = ({
@@ -44,6 +44,7 @@ const Node: FC> = ({
const node = getNode(value_selector[0])
const isSystem = isSystemVar(value_selector)
const isEnv = isENV(value_selector)
+ const isChatVar = isConversationVar(value_selector)
const varName = isSystem ? `sys.${value_selector[value_selector.length - 1]}` : value_selector[value_selector.length - 1]
const varType = getCurrentVariableType({
valueSelector: value_selector,
@@ -53,7 +54,7 @@ const Node: FC> = ({
return (
- {!isEnv && (
+ {!isEnv && !isChatVar && (
<>
> = ({
>
)}
- {!isEnv &&
}
+ {!isEnv && !isChatVar &&
}
{isEnv &&
}
-
{varName}
+ {isChatVar &&
}
+
+
{varName}
diff --git a/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/index.tsx b/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/index.tsx
index c95e020bbb4dbd..559c4974a678e6 100644
--- a/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/index.tsx
+++ b/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/index.tsx
@@ -17,6 +17,8 @@ type Props = {
onChange: (newList: KeyValue[]) => void
onAdd: () => void
// onSwitchToBulkEdit: () => void
+ keyNotSupportVar?: boolean
+ insertVarTipToLeft?: boolean
}
const KeyValueList: FC
= ({
@@ -26,6 +28,8 @@ const KeyValueList: FC = ({
onChange,
onAdd,
// onSwitchToBulkEdit,
+ keyNotSupportVar,
+ insertVarTipToLeft,
}) => {
const { t } = useTranslation()
@@ -47,6 +51,9 @@ const KeyValueList: FC = ({
}
}, [list, onChange])
+ if (!Array.isArray(list))
+ return null
+
return (
@@ -79,6 +86,8 @@ const KeyValueList: FC
= ({
onAdd={onAdd}
readonly={readonly}
canRemove={list.length > 1}
+ keyNotSupportVar={keyNotSupportVar}
+ insertVarTipToLeft={insertVarTipToLeft}
/>
))
}
diff --git a/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/input-item.tsx b/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/input-item.tsx
index 16b1674d5450dd..3adcd132c0391e 100644
--- a/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/input-item.tsx
+++ b/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/input-item.tsx
@@ -18,6 +18,7 @@ type Props = {
onRemove?: () => void
placeholder?: string
readOnly?: boolean
+ insertVarTipToLeft?: boolean
}
const InputItem: FC = ({
@@ -30,6 +31,7 @@ const InputItem: FC = ({
onRemove,
placeholder,
readOnly,
+ insertVarTipToLeft,
}) => {
const { t } = useTranslation()
@@ -64,6 +66,7 @@ const InputItem: FC = ({
placeholder={t('workflow.nodes.http.insertVarPlaceholder')!}
placeholderClassName='!leading-[21px]'
promptMinHeightClassName='h-full'
+ insertVarTipToLeft={insertVarTipToLeft}
/>
)
: = ({
placeholder={t('workflow.nodes.http.insertVarPlaceholder')!}
placeholderClassName='!leading-[21px]'
promptMinHeightClassName='h-full'
+ insertVarTipToLeft={insertVarTipToLeft}
/>
)}
diff --git a/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/item.tsx b/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/item.tsx
index 7839b947301883..93c2696b983957 100644
--- a/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/item.tsx
+++ b/web/app/components/workflow/nodes/http/components/key-value/key-value-edit/item.tsx
@@ -6,6 +6,7 @@ import produce from 'immer'
import type { KeyValue } from '../../../types'
import InputItem from './input-item'
import cn from '@/utils/classnames'
+import Input from '@/app/components/base/input'
const i18nPrefix = 'workflow.nodes.http'
@@ -20,6 +21,8 @@ type Props = {
onRemove: () => void
isLastItem: boolean
onAdd: () => void
+ keyNotSupportVar?: boolean
+ insertVarTipToLeft?: boolean
}
const KeyValueItem: FC
= ({
@@ -33,6 +36,8 @@ const KeyValueItem: FC = ({
onRemove,
isLastItem,
onAdd,
+ keyNotSupportVar,
+ insertVarTipToLeft,
}) => {
const { t } = useTranslation()
@@ -51,15 +56,26 @@ const KeyValueItem: FC = ({
// group class name is for hover row show remove button
diff --git a/web/app/components/workflow/nodes/if-else/components/condition-value.tsx b/web/app/components/workflow/nodes/if-else/components/condition-value.tsx
index eea3c583e5af3f..b7073350ab3ad5 100644
--- a/web/app/components/workflow/nodes/if-else/components/condition-value.tsx
+++ b/web/app/components/workflow/nodes/if-else/components/condition-value.tsx
@@ -9,9 +9,9 @@ import {
isComparisonOperatorNeedTranslate,
} from '../utils'
import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development'
-import { Env } from '@/app/components/base/icons/src/vender/line/others'
+import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others'
import cn from '@/utils/classnames'
-import { isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
type ConditionValueProps = {
variableSelector: string[]
@@ -27,7 +27,8 @@ const ConditionValue = ({
const variableName = isSystemVar(variableSelector) ? variableSelector.slice(0).join('.') : variableSelector.slice(1).join('.')
const operatorName = isComparisonOperatorNeedTranslate(operator) ? t(`workflow.nodes.ifElse.comparisonOperator.${operator}`) : operator
const notHasValue = comparisonOperatorNotRequireValue(operator)
-
+ const isEnvVar = isENV(variableSelector)
+ const isChatVar = isConversationVar(variableSelector)
const formatValue = useMemo(() => {
if (notHasValue)
return ''
@@ -43,8 +44,10 @@ const ConditionValue = ({
return (
- {!isENV(variableSelector) &&
}
- {isENV(variableSelector) &&
}
+ {!isEnvVar && !isChatVar &&
}
+ {isEnvVar &&
}
+ {isChatVar &&
}
+
{
+ const updateNodeInternals = useUpdateNodeInternals()
const { nodesReadOnly: readOnly } = useNodesReadOnly()
const { handleEdgeDeleteByDeleteBranch } = useEdgesInteractions()
const { inputs, setInputs } = useNodeCrud
(id, payload)
@@ -108,6 +110,7 @@ const useConfig = (id: string, payload: IfElseNodeType) => {
])
})
setInputs(newInputs)
+ updateNodeInternals(id)
}, [inputs, setInputs])
const handleAddCondition = useCallback((caseId, valueSelector, varItem) => {
diff --git a/web/app/components/workflow/nodes/knowledge-retrieval/default.ts b/web/app/components/workflow/nodes/knowledge-retrieval/default.ts
index f66cab6ca937c7..03591dd527aaa1 100644
--- a/web/app/components/workflow/nodes/knowledge-retrieval/default.ts
+++ b/web/app/components/workflow/nodes/knowledge-retrieval/default.ts
@@ -36,7 +36,7 @@ const nodeDefault: NodeDefault = {
if (!errorMessages && (!payload.dataset_ids || payload.dataset_ids.length === 0))
errorMessages = t(`${i18nPrefix}.errorMsg.fieldRequired`, { field: t(`${i18nPrefix}.nodes.knowledgeRetrieval.knowledge`) })
- if (!errorMessages && payload.retrieval_mode === RETRIEVE_TYPE.multiWay && payload.multiple_retrieval_config?.reranking_mode === RerankingModeEnum.RerankingModel && !payload.multiple_retrieval_config?.reranking_model?.provider)
+ if (!errorMessages && payload.retrieval_mode === RETRIEVE_TYPE.multiWay && payload.multiple_retrieval_config?.reranking_mode === RerankingModeEnum.RerankingModel && !payload.multiple_retrieval_config?.reranking_model?.provider && payload.multiple_retrieval_config?.reranking_enable)
errorMessages = t(`${i18nPrefix}.errorMsg.fieldRequired`, { field: t(`${i18nPrefix}.errorMsg.fields.rerankModel`) })
if (!errorMessages && payload.retrieval_mode === RETRIEVE_TYPE.oneWay && !payload.single_retrieval_config?.model?.provider)
diff --git a/web/app/components/workflow/nodes/variable-assigner/components/node-group-item.tsx b/web/app/components/workflow/nodes/variable-assigner/components/node-group-item.tsx
index 5f87dd6fecf3d1..e0c15e396bbd3c 100644
--- a/web/app/components/workflow/nodes/variable-assigner/components/node-group-item.tsx
+++ b/web/app/components/workflow/nodes/variable-assigner/components/node-group-item.tsx
@@ -19,7 +19,7 @@ import {
import { filterVar } from '../utils'
import AddVariable from './add-variable'
import NodeVariableItem from './node-variable-item'
-import { isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils'
import cn from '@/utils/classnames'
const i18nPrefix = 'workflow.nodes.variableAssigner'
@@ -124,6 +124,8 @@ const NodeGroupItem = ({
!!item.variables.length && item.variables.map((variable = [], index) => {
const isSystem = isSystemVar(variable)
const isEnv = isENV(variable)
+ const isChatVar = isConversationVar(variable)
+
const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0])
const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.')
@@ -131,6 +133,7 @@ const NodeGroupItem = ({
{
return (
- {!isEnv && (
+ {!isEnv && !isChatVar && (
)}
- {!isEnv &&
}
+ {!isEnv && !isChatVar &&
}
{isEnv &&
}
-
{varName}
+ {isChatVar &&
}
+
{varName}
)
diff --git a/web/app/components/workflow/nodes/variable-assigner/hooks.ts b/web/app/components/workflow/nodes/variable-assigner/hooks.ts
index d9ae25416f7141..0e5e10c741b3f4 100644
--- a/web/app/components/workflow/nodes/variable-assigner/hooks.ts
+++ b/web/app/components/workflow/nodes/variable-assigner/hooks.ts
@@ -143,6 +143,7 @@ export const useGetAvailableVars = () => {
beforeNodes: uniqBy(availableNodes, 'id').filter(node => node.id !== nodeId),
isChatMode,
hideEnv,
+ hideChatVar: hideEnv,
filterVar,
})
.map(node => ({
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/array-value-list.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/array-value-list.tsx
new file mode 100644
index 00000000000000..a040f2fd8e38f4
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/array-value-list.tsx
@@ -0,0 +1,72 @@
+'use client'
+import type { FC } from 'react'
+import React, { useCallback } from 'react'
+import { useTranslation } from 'react-i18next'
+import { RiAddLine } from '@remixicon/react'
+import produce from 'immer'
+import RemoveButton from '@/app/components/workflow/nodes/_base/components/remove-button'
+import Button from '@/app/components/base/button'
+
+type Props = {
+ isString: boolean
+ list: any[]
+ onChange: (list: any[]) => void
+}
+
+const ArrayValueList: FC
= ({
+ isString = true,
+ list,
+ onChange,
+}) => {
+ const { t } = useTranslation()
+
+ const handleNameChange = useCallback((index: number) => {
+ return (e: React.ChangeEvent) => {
+ const newList = produce(list, (draft: any[]) => {
+ draft[index] = isString ? e.target.value : Number(e.target.value)
+ })
+ onChange(newList)
+ }
+ }, [isString, list, onChange])
+
+ const handleItemRemove = useCallback((index: number) => {
+ return () => {
+ const newList = produce(list, (draft) => {
+ draft.splice(index, 1)
+ })
+ onChange(newList)
+ }
+ }, [list, onChange])
+
+ const handleItemAdd = useCallback(() => {
+ const newList = produce(list, (draft: any[]) => {
+ draft.push(undefined)
+ })
+ onChange(newList)
+ }, [list, onChange])
+
+ return (
+
+ {list.map((item, index) => (
+
+
+
+
+ ))}
+
+
+ {t('workflow.chatVariable.modal.addArrayValue')}
+
+
+ )
+}
+export default React.memo(ArrayValueList)
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/object-value-item.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/object-value-item.tsx
new file mode 100644
index 00000000000000..6bbdeae08bb96a
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/object-value-item.tsx
@@ -0,0 +1,135 @@
+'use client'
+import type { FC } from 'react'
+import React, { useCallback, useState } from 'react'
+import { useTranslation } from 'react-i18next'
+import produce from 'immer'
+import { useContext } from 'use-context-selector'
+import { ToastContext } from '@/app/components/base/toast'
+import VariableTypeSelector from '@/app/components/workflow/panel/chat-variable-panel/components/variable-type-select'
+import RemoveButton from '@/app/components/workflow/nodes/_base/components/remove-button'
+import { ChatVarType } from '@/app/components/workflow/panel/chat-variable-panel/type'
+
+type Props = {
+ index: number
+ list: any[]
+ onChange: (list: any[]) => void
+}
+
+const typeList = [
+ ChatVarType.String,
+ ChatVarType.Number,
+]
+
+export const DEFAULT_OBJECT_VALUE = {
+ key: '',
+ type: ChatVarType.String,
+ value: undefined,
+}
+
+const ObjectValueItem: FC = ({
+ index,
+ list,
+ onChange,
+}) => {
+ const { t } = useTranslation()
+ const { notify } = useContext(ToastContext)
+ const [isFocus, setIsFocus] = useState(false)
+
+ const handleKeyChange = useCallback((index: number) => {
+ return (e: React.ChangeEvent) => {
+ const newList = produce(list, (draft: any[]) => {
+ if (!/^[a-zA-Z0-9_]+$/.test(e.target.value))
+ return notify({ type: 'error', message: 'key is can only contain letters, numbers and underscores' })
+ draft[index].key = e.target.value
+ })
+ onChange(newList)
+ }
+ }, [list, notify, onChange])
+
+ const handleTypeChange = useCallback((index: number) => {
+ return (type: ChatVarType) => {
+ const newList = produce(list, (draft) => {
+ draft[index].type = type
+ if (type === ChatVarType.Number)
+ draft[index].value = isNaN(Number(draft[index].value)) ? undefined : Number(draft[index].value)
+ else
+ draft[index].value = draft[index].value ? String(draft[index].value) : undefined
+ })
+ onChange(newList)
+ }
+ }, [list, onChange])
+
+ const handleValueChange = useCallback((index: number) => {
+ return (e: React.ChangeEvent) => {
+ const newList = produce(list, (draft: any[]) => {
+ draft[index].value = draft[index].type === ChatVarType.String ? e.target.value : isNaN(Number(e.target.value)) ? undefined : Number(e.target.value)
+ })
+ onChange(newList)
+ }
+ }, [list, onChange])
+
+ const handleItemRemove = useCallback((index: number) => {
+ return () => {
+ const newList = produce(list, (draft) => {
+ draft.splice(index, 1)
+ })
+ onChange(newList)
+ }
+ }, [list, onChange])
+
+ const handleItemAdd = useCallback(() => {
+ const newList = produce(list, (draft: any[]) => {
+ draft.push(DEFAULT_OBJECT_VALUE)
+ })
+ onChange(newList)
+ }, [list, onChange])
+
+ const handleFocusChange = useCallback(() => {
+ setIsFocus(true)
+ if (index === list.length - 1)
+ handleItemAdd()
+ }, [handleItemAdd, index, list.length])
+
+ return (
+
+ {/* Key */}
+
+
+
+ {/* Type */}
+
+
+
+ {/* Value */}
+
+ handleFocusChange()}
+ onBlur={() => setIsFocus(false)}
+ type={list[index].type === ChatVarType.Number ? 'number' : 'text'}
+ />
+ {list.length > 1 && !isFocus && (
+
+ )}
+
+
+ )
+}
+export default React.memo(ObjectValueItem)
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/object-value-list.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/object-value-list.tsx
new file mode 100644
index 00000000000000..ec287accba0b84
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/object-value-list.tsx
@@ -0,0 +1,36 @@
+'use client'
+import type { FC } from 'react'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+import ObjectValueItem from '@/app/components/workflow/panel/chat-variable-panel/components/object-value-item'
+
+type Props = {
+ list: any[]
+ onChange: (list: any[]) => void
+}
+
+const ObjectValueList: FC = ({
+ list,
+ onChange,
+}) => {
+ const { t } = useTranslation()
+
+ return (
+
+
+
{t('workflow.chatVariable.modal.objectKey')}
+
{t('workflow.chatVariable.modal.objectType')}
+
{t('workflow.chatVariable.modal.objectValue')}
+
+ {list.map((item, index) => (
+
+ ))}
+
+ )
+}
+export default React.memo(ObjectValueList)
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/variable-item.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/variable-item.tsx
new file mode 100644
index 00000000000000..a1a7c9dc3d3460
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/variable-item.tsx
@@ -0,0 +1,49 @@
+import { memo, useState } from 'react'
+import { capitalize } from 'lodash-es'
+import { RiDeleteBinLine, RiEditLine } from '@remixicon/react'
+import { BubbleX } from '@/app/components/base/icons/src/vender/line/others'
+import type { ConversationVariable } from '@/app/components/workflow/types'
+import cn from '@/utils/classnames'
+
+type VariableItemProps = {
+ item: ConversationVariable
+ onEdit: (item: ConversationVariable) => void
+ onDelete: (item: ConversationVariable) => void
+}
+
+const VariableItem = ({
+ item,
+ onEdit,
+ onDelete,
+}: VariableItemProps) => {
+ const [destructive, setDestructive] = useState(false)
+ return (
+
+
+
+
+
{item.name}
+
{capitalize(item.value_type)}
+
+
+
+ onEdit(item)}/>
+
+
setDestructive(true)}
+ onMouseOut={() => setDestructive(false)}
+ >
+ onDelete(item)}/>
+
+
+
+
{item.description}
+
+ )
+}
+
+export default memo(VariableItem)
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal-trigger.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal-trigger.tsx
new file mode 100644
index 00000000000000..35d5254327b7f3
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal-trigger.tsx
@@ -0,0 +1,69 @@
+'use client'
+import React from 'react'
+import { useTranslation } from 'react-i18next'
+import { RiAddLine } from '@remixicon/react'
+import Button from '@/app/components/base/button'
+import VariableModal from '@/app/components/workflow/panel/chat-variable-panel/components/variable-modal'
+import {
+ PortalToFollowElem,
+ PortalToFollowElemContent,
+ PortalToFollowElemTrigger,
+} from '@/app/components/base/portal-to-follow-elem'
+import type { ConversationVariable } from '@/app/components/workflow/types'
+
+type Props = {
+ open: boolean
+ setOpen: (value: React.SetStateAction) => void
+ showTip: boolean
+ chatVar?: ConversationVariable
+ onClose: () => void
+ onSave: (env: ConversationVariable) => void
+}
+
+const VariableModalTrigger = ({
+ open,
+ setOpen,
+ showTip,
+ chatVar,
+ onClose,
+ onSave,
+}: Props) => {
+ const { t } = useTranslation()
+
+ return (
+ {
+ setOpen(v => !v)
+ open && onClose()
+ }}
+ placement='left-start'
+ offset={{
+ mainAxis: 8,
+ alignmentAxis: showTip ? -278 : -48,
+ }}
+ >
+ {
+ setOpen(v => !v)
+ open && onClose()
+ }}>
+
+
+ {t('workflow.chatVariable.button')}
+
+
+
+ {
+ onClose()
+ setOpen(false)
+ }}
+ />
+
+
+ )
+}
+
+export default VariableModalTrigger
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx
new file mode 100644
index 00000000000000..289e29d5921d02
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx
@@ -0,0 +1,394 @@
+import React, { useCallback, useEffect, useMemo } from 'react'
+import { useTranslation } from 'react-i18next'
+import { useContext } from 'use-context-selector'
+import { v4 as uuid4 } from 'uuid'
+import { RiCloseLine, RiDraftLine, RiInputField } from '@remixicon/react'
+import VariableTypeSelector from '@/app/components/workflow/panel/chat-variable-panel/components/variable-type-select'
+import ObjectValueList from '@/app/components/workflow/panel/chat-variable-panel/components/object-value-list'
+import { DEFAULT_OBJECT_VALUE } from '@/app/components/workflow/panel/chat-variable-panel/components/object-value-item'
+import ArrayValueList from '@/app/components/workflow/panel/chat-variable-panel/components/array-value-list'
+import Button from '@/app/components/base/button'
+import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor'
+import { ToastContext } from '@/app/components/base/toast'
+import { useStore } from '@/app/components/workflow/store'
+import type { ConversationVariable } from '@/app/components/workflow/types'
+import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
+import { ChatVarType } from '@/app/components/workflow/panel/chat-variable-panel/type'
+import cn from '@/utils/classnames'
+
+export type ModalPropsType = {
+ chatVar?: ConversationVariable
+ onClose: () => void
+ onSave: (chatVar: ConversationVariable) => void
+}
+
+type ObjectValueItem = {
+ key: string
+ type: ChatVarType
+ value: string | number | undefined
+}
+
+const typeList = [
+ ChatVarType.String,
+ ChatVarType.Number,
+ ChatVarType.Object,
+ ChatVarType.ArrayString,
+ ChatVarType.ArrayNumber,
+ ChatVarType.ArrayObject,
+]
+
+const objectPlaceholder = `# example
+# {
+# "name": "ray",
+# "age": 20
+# }`
+const arrayStringPlaceholder = `# example
+# [
+# "value1",
+# "value2"
+# ]`
+const arrayNumberPlaceholder = `# example
+# [
+# 100,
+# 200
+# ]`
+const arrayObjectPlaceholder = `# example
+# [
+# {
+# "name": "ray",
+# "age": 20
+# },
+# {
+# "name": "lily",
+# "age": 18
+# }
+# ]`
+
+const ChatVariableModal = ({
+ chatVar,
+ onClose,
+ onSave,
+}: ModalPropsType) => {
+ const { t } = useTranslation()
+ const { notify } = useContext(ToastContext)
+ const varList = useStore(s => s.conversationVariables)
+ const [name, setName] = React.useState('')
+ const [type, setType] = React.useState(ChatVarType.String)
+ const [value, setValue] = React.useState()
+ const [objectValue, setObjectValue] = React.useState([DEFAULT_OBJECT_VALUE])
+ const [editorContent, setEditorContent] = React.useState()
+ const [editInJSON, setEditInJSON] = React.useState(false)
+ const [des, setDes] = React.useState('')
+
+ const editorMinHeight = useMemo(() => {
+ if (type === ChatVarType.ArrayObject)
+ return '240px'
+ return '120px'
+ }, [type])
+ const placeholder = useMemo(() => {
+ if (type === ChatVarType.ArrayString)
+ return arrayStringPlaceholder
+ if (type === ChatVarType.ArrayNumber)
+ return arrayNumberPlaceholder
+ if (type === ChatVarType.ArrayObject)
+ return arrayObjectPlaceholder
+ return objectPlaceholder
+ }, [type])
+ const getObjectValue = useCallback(() => {
+ if (!chatVar)
+ return [DEFAULT_OBJECT_VALUE]
+ return Object.keys(chatVar.value).map((key) => {
+ return {
+ key,
+ type: typeof chatVar.value[key] === 'string' ? ChatVarType.String : ChatVarType.Number,
+ value: chatVar.value[key],
+ }
+ })
+ }, [chatVar])
+ const formatValueFromObject = useCallback((list: ObjectValueItem[]) => {
+ return list.reduce((acc: any, curr) => {
+ if (curr.key)
+ acc[curr.key] = curr.value || null
+ return acc
+ }, {})
+ }, [])
+
+ const formatValue = (value: any) => {
+ switch (type) {
+ case ChatVarType.String:
+ return value || ''
+ case ChatVarType.Number:
+ return value || 0
+ case ChatVarType.Object:
+ return formatValueFromObject(objectValue)
+ case ChatVarType.ArrayString:
+ case ChatVarType.ArrayNumber:
+ case ChatVarType.ArrayObject:
+ return value?.filter(Boolean) || []
+ }
+ }
+
+ const handleNameChange = (v: string) => {
+ if (!v)
+ return setName('')
+ if (!/^[a-zA-Z0-9_]+$/.test(v))
+ return notify({ type: 'error', message: 'name is can only contain letters, numbers and underscores' })
+ if (/^[0-9]/.test(v))
+ return notify({ type: 'error', message: 'name can not start with a number' })
+ setName(v)
+ }
+
+ const handleTypeChange = (v: ChatVarType) => {
+ setValue(undefined)
+ setEditorContent(undefined)
+ if (v === ChatVarType.ArrayObject)
+ setEditInJSON(true)
+ if (v === ChatVarType.String || v === ChatVarType.Number || v === ChatVarType.Object)
+ setEditInJSON(false)
+ setType(v)
+ }
+
+ const handleEditorChange = (editInJSON: boolean) => {
+ if (type === ChatVarType.Object) {
+ if (editInJSON) {
+ const newValue = !objectValue[0].key ? undefined : formatValueFromObject(objectValue)
+ setValue(newValue)
+ setEditorContent(JSON.stringify(newValue))
+ }
+ else {
+ if (!editorContent) {
+ setValue(undefined)
+ setObjectValue([DEFAULT_OBJECT_VALUE])
+ }
+ else {
+ try {
+ const newValue = JSON.parse(editorContent)
+ setValue(newValue)
+ const newObjectValue = Object.keys(newValue).map((key) => {
+ return {
+ key,
+ type: typeof newValue[key] === 'string' ? ChatVarType.String : ChatVarType.Number,
+ value: newValue[key],
+ }
+ })
+ setObjectValue(newObjectValue)
+ }
+ catch (e) {
+ // ignore JSON.parse errors
+ }
+ }
+ }
+ }
+ if (type === ChatVarType.ArrayString || type === ChatVarType.ArrayNumber) {
+ if (editInJSON) {
+ const newValue = (value?.length && value.filter(Boolean).length) ? value.filter(Boolean) : undefined
+ setValue(newValue)
+ if (!editorContent)
+ setEditorContent(JSON.stringify(newValue))
+ }
+ else {
+ setValue(value?.length ? value : [undefined])
+ }
+ }
+ setEditInJSON(editInJSON)
+ }
+
+ const handleEditorValueChange = (content: string) => {
+ if (!content) {
+ setEditorContent(content)
+ return setValue(undefined)
+ }
+ else {
+ setEditorContent(content)
+ try {
+ const newValue = JSON.parse(content)
+ setValue(newValue)
+ }
+ catch (e) {
+ // ignore JSON.parse errors
+ }
+ }
+ }
+
+ const handleSave = () => {
+ if (!name)
+ return notify({ type: 'error', message: 'name can not be empty' })
+ if (!chatVar && varList.some(chatVar => chatVar.name === name))
+ return notify({ type: 'error', message: 'name is existed' })
+ // if (type !== ChatVarType.Object && !value)
+ // return notify({ type: 'error', message: 'value can not be empty' })
+ if (type === ChatVarType.Object && objectValue.some(item => !item.key && !!item.value))
+ return notify({ type: 'error', message: 'object key can not be empty' })
+
+ onSave({
+ id: chatVar ? chatVar.id : uuid4(),
+ name,
+ value_type: type,
+ value: formatValue(value),
+ description: des,
+ })
+ onClose()
+ }
+
+ useEffect(() => {
+ if (chatVar) {
+ setName(chatVar.name)
+ setType(chatVar.value_type)
+ setValue(chatVar.value)
+ setDes(chatVar.description)
+ setObjectValue(getObjectValue())
+ if (chatVar.value_type === ChatVarType.ArrayObject) {
+ setEditorContent(JSON.stringify(chatVar.value))
+ setEditInJSON(true)
+ }
+ else {
+ setEditInJSON(false)
+ }
+ }
+ }, [chatVar, getObjectValue])
+
+ return (
+
+
+ {!chatVar ? t('workflow.chatVariable.modal.title') : t('workflow.chatVariable.modal.editTitle')}
+
+
+
+ {/* name */}
+
+
{t('workflow.chatVariable.modal.name')}
+
+ handleNameChange(e.target.value)}
+ type='text'
+ />
+
+
+ {/* type */}
+
+
{t('workflow.chatVariable.modal.type')}
+
+
+
+
+ {/* default value */}
+
+
+
{t('workflow.chatVariable.modal.value')}
+ {(type === ChatVarType.ArrayString || type === ChatVarType.ArrayNumber) && (
+
handleEditorChange(!editInJSON)}
+ >
+ {editInJSON ? : }
+ {editInJSON ? t('workflow.chatVariable.modal.oneByOne') : t('workflow.chatVariable.modal.editInJSON')}
+
+ )}
+ {type === ChatVarType.Object && (
+
handleEditorChange(!editInJSON)}
+ >
+ {editInJSON ? : }
+ {editInJSON ? t('workflow.chatVariable.modal.editInForm') : t('workflow.chatVariable.modal.editInJSON')}
+
+ )}
+
+
+ {type === ChatVarType.String && (
+
setValue(e.target.value)}
+ />
+ )}
+ {type === ChatVarType.Number && (
+
setValue(Number(e.target.value))}
+ type='number'
+ />
+ )}
+ {type === ChatVarType.Object && !editInJSON && (
+
+ )}
+ {type === ChatVarType.ArrayString && !editInJSON && (
+
+ )}
+ {type === ChatVarType.ArrayNumber && !editInJSON && (
+
+ )}
+ {editInJSON && (
+
+ {placeholder}
}
+ onChange={handleEditorValueChange}
+ />
+
+ )}
+
+
+ {/* description */}
+
+
{t('workflow.chatVariable.modal.description')}
+
+
+
+
+
+
+ {t('common.operation.cancel')}
+ {t('common.operation.save')}
+
+
+
+ )
+}
+
+export default ChatVariableModal
diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx
new file mode 100644
index 00000000000000..94a319514aa727
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/components/variable-type-select.tsx
@@ -0,0 +1,66 @@
+'use client'
+import React, { useState } from 'react'
+import { RiArrowDownSLine, RiCheckLine } from '@remixicon/react'
+import {
+ PortalToFollowElem,
+ PortalToFollowElemContent,
+ PortalToFollowElemTrigger,
+} from '@/app/components/base/portal-to-follow-elem'
+import cn from '@/utils/classnames'
+
+type Props = {
+ inCell?: boolean
+ value?: any
+ list: any
+ onSelect: (value: any) => void
+ popupClassName?: string
+}
+
+const VariableTypeSelector = ({
+ inCell = false,
+ value,
+ list,
+ onSelect,
+ popupClassName,
+}: Props) => {
+ const [open, setOpen] = useState(false)
+
+ return (
+
setOpen(v => !v)}
+ placement='bottom'
+ >
+ setOpen(v => !v)}>
+
+
+
+
+ {list.map((item: any) => (
+
{
+ onSelect(item)
+ setOpen(false)
+ }}>
+
{item}
+ {value === item &&
}
+
+ ))}
+
+
+
+ )
+}
+
+export default VariableTypeSelector
diff --git a/web/app/components/workflow/panel/chat-variable-panel/index.tsx b/web/app/components/workflow/panel/chat-variable-panel/index.tsx
new file mode 100644
index 00000000000000..8ae17a4db344b4
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/index.tsx
@@ -0,0 +1,202 @@
+import {
+ memo,
+ useCallback,
+ useState,
+} from 'react'
+import { useContext } from 'use-context-selector'
+import {
+ useStoreApi,
+} from 'reactflow'
+import { RiBookOpenLine, RiCloseLine } from '@remixicon/react'
+import { useTranslation } from 'react-i18next'
+import { useStore } from '@/app/components/workflow/store'
+import ActionButton, { ActionButtonState } from '@/app/components/base/action-button'
+import { BubbleX, LongArrowLeft, LongArrowRight } from '@/app/components/base/icons/src/vender/line/others'
+import BlockIcon from '@/app/components/workflow/block-icon'
+import VariableModalTrigger from '@/app/components/workflow/panel/chat-variable-panel/components/variable-modal-trigger'
+import VariableItem from '@/app/components/workflow/panel/chat-variable-panel/components/variable-item'
+import RemoveEffectVarConfirm from '@/app/components/workflow/nodes/_base/components/remove-effect-var-confirm'
+import type {
+ ConversationVariable,
+} from '@/app/components/workflow/types'
+import { findUsedVarNodes, updateNodeVars } from '@/app/components/workflow/nodes/_base/components/variable/utils'
+import { useNodesSyncDraft } from '@/app/components/workflow/hooks/use-nodes-sync-draft'
+import { BlockEnum } from '@/app/components/workflow/types'
+import I18n from '@/context/i18n'
+import { LanguagesSupported } from '@/i18n/language'
+import cn from '@/utils/classnames'
+
+const ChatVariablePanel = () => {
+ const { t } = useTranslation()
+ const { locale } = useContext(I18n)
+ const store = useStoreApi()
+ const setShowChatVariablePanel = useStore(s => s.setShowChatVariablePanel)
+ const varList = useStore(s => s.conversationVariables) as ConversationVariable[]
+ const updateChatVarList = useStore(s => s.setConversationVariables)
+ const { doSyncWorkflowDraft } = useNodesSyncDraft()
+
+ const [showTip, setShowTip] = useState(true)
+ const [showVariableModal, setShowVariableModal] = useState(false)
+ const [currentVar, setCurrentVar] = useState
()
+
+ const [showRemoveVarConfirm, setShowRemoveConfirm] = useState(false)
+ const [cacheForDelete, setCacheForDelete] = useState()
+
+ const getEffectedNodes = useCallback((chatVar: ConversationVariable) => {
+ const { getNodes } = store.getState()
+ const allNodes = getNodes()
+ return findUsedVarNodes(
+ ['conversation', chatVar.name],
+ allNodes,
+ )
+ }, [store])
+
+ const removeUsedVarInNodes = useCallback((chatVar: ConversationVariable) => {
+ const { getNodes, setNodes } = store.getState()
+ const effectedNodes = getEffectedNodes(chatVar)
+ const newNodes = getNodes().map((node) => {
+ if (effectedNodes.find(n => n.id === node.id))
+ return updateNodeVars(node, ['conversation', chatVar.name], [])
+
+ return node
+ })
+ setNodes(newNodes)
+ }, [getEffectedNodes, store])
+
+ const handleEdit = (chatVar: ConversationVariable) => {
+ setCurrentVar(chatVar)
+ setShowVariableModal(true)
+ }
+
+ const handleDelete = useCallback((chatVar: ConversationVariable) => {
+ removeUsedVarInNodes(chatVar)
+ updateChatVarList(varList.filter(v => v.id !== chatVar.id))
+ setCacheForDelete(undefined)
+ setShowRemoveConfirm(false)
+ doSyncWorkflowDraft()
+ }, [doSyncWorkflowDraft, removeUsedVarInNodes, updateChatVarList, varList])
+
+ const deleteCheck = useCallback((chatVar: ConversationVariable) => {
+ const effectedNodes = getEffectedNodes(chatVar)
+ if (effectedNodes.length > 0) {
+ setCacheForDelete(chatVar)
+ setShowRemoveConfirm(true)
+ }
+ else {
+ handleDelete(chatVar)
+ }
+ }, [getEffectedNodes, handleDelete])
+
+ const handleSave = useCallback(async (chatVar: ConversationVariable) => {
+ // add chatVar
+ if (!currentVar) {
+ const newList = [chatVar, ...varList]
+ updateChatVarList(newList)
+ doSyncWorkflowDraft()
+ return
+ }
+ // edit chatVar
+ const newList = varList.map(v => v.id === currentVar.id ? chatVar : v)
+ updateChatVarList(newList)
+ // side effects of rename env
+ if (currentVar.name !== chatVar.name) {
+ const { getNodes, setNodes } = store.getState()
+ const effectedNodes = getEffectedNodes(currentVar)
+ const newNodes = getNodes().map((node) => {
+ if (effectedNodes.find(n => n.id === node.id))
+ return updateNodeVars(node, ['conversation', currentVar.name], ['conversation', chatVar.name])
+
+ return node
+ })
+ setNodes(newNodes)
+ }
+ doSyncWorkflowDraft()
+ }, [currentVar, doSyncWorkflowDraft, getEffectedNodes, store, updateChatVarList, varList])
+
+ return (
+
+
+ {t('workflow.chatVariable.panelTitle')}
+
+
setShowTip(!showTip)}>
+
+
+
setShowChatVariablePanel(false)}
+ >
+
+
+
+
+ {showTip && (
+
+
+
TIPS
+
+
+
+
+
conversation_var
+
String
+
+
+
+
+
+
{t('workflow.blocks.assigner')}
+
+
+
+
+
{t('workflow.blocks.llm')}
+
+
+
+
+
+
+ )}
+
+ setCurrentVar(undefined)}
+ />
+
+
+ {varList.map(chatVar => (
+
+ ))}
+
+
setShowRemoveConfirm(false)}
+ onConfirm={() => cacheForDelete && handleDelete(cacheForDelete)}
+ />
+
+ )
+}
+
+export default memo(ChatVariablePanel)
diff --git a/web/app/components/workflow/panel/chat-variable-panel/type.ts b/web/app/components/workflow/panel/chat-variable-panel/type.ts
new file mode 100644
index 00000000000000..2a4e776463357a
--- /dev/null
+++ b/web/app/components/workflow/panel/chat-variable-panel/type.ts
@@ -0,0 +1,8 @@
+export enum ChatVarType {
+ Number = 'number',
+ String = 'string',
+ Object = 'object',
+ ArrayString = 'array[string]',
+ ArrayNumber = 'array[number]',
+ ArrayObject = 'array[object]',
+}
diff --git a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx
index 015f85d1010383..465594003757ee 100644
--- a/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx
+++ b/web/app/components/workflow/panel/debug-and-preview/chat-wrapper.tsx
@@ -14,6 +14,7 @@ import {
import type { StartNodeType } from '../../nodes/start/types'
import Empty from './empty'
import UserInput from './user-input'
+import ConversationVariableModal from './conversation-variable-modal'
import { useChat } from './hooks'
import type { ChatWrapperRefType } from './index'
import Chat from '@/app/components/base/chat/chat'
@@ -25,7 +26,13 @@ import {
} from '@/service/debug'
import { useStore as useAppStore } from '@/app/components/app/store'
-const ChatWrapper = forwardRef((_, ref) => {
+type ChatWrapperProps = {
+ showConversationVariableModal: boolean
+ onConversationModalHide: () => void
+ showInputsFieldsPanel: boolean
+}
+
+const ChatWrapper = forwardRef(({ showConversationVariableModal, onConversationModalHide, showInputsFieldsPanel }, ref) => {
const nodes = useNodes()
const startNode = nodes.find(node => node.data.type === BlockEnum.Start)
const startVariables = startNode?.data.variables
@@ -87,33 +94,41 @@ const ChatWrapper = forwardRef((_, ref) => {
}, [handleRestart])
return (
-
-
- {
- !chatList.length && (
-
- )
- }
- >
+ <>
+
+ {showInputsFieldsPanel && }
+ {
+ !chatList.length && (
+
+ )
+ }
+ >
+ )}
+ suggestedQuestions={suggestedQuestions}
+ showPromptLog
+ chatAnswerContainerInner='!pr-2'
+ />
+ {showConversationVariableModal && (
+
)}
- suggestedQuestions={suggestedQuestions}
- showPromptLog
- chatAnswerContainerInner='!pr-2'
- />
+ >
)
})
diff --git a/web/app/components/workflow/panel/debug-and-preview/conversation-variable-modal.tsx b/web/app/components/workflow/panel/debug-and-preview/conversation-variable-modal.tsx
new file mode 100644
index 00000000000000..a2faa14d89dba1
--- /dev/null
+++ b/web/app/components/workflow/panel/debug-and-preview/conversation-variable-modal.tsx
@@ -0,0 +1,155 @@
+'use client'
+import React, { useCallback } from 'react'
+import { useMount } from 'ahooks'
+import { useTranslation } from 'react-i18next'
+import { capitalize } from 'lodash-es'
+import copy from 'copy-to-clipboard'
+import { RiCloseLine } from '@remixicon/react'
+import Modal from '@/app/components/base/modal'
+import { BubbleX } from '@/app/components/base/icons/src/vender/line/others'
+import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor'
+import {
+ Clipboard,
+ ClipboardCheck,
+} from '@/app/components/base/icons/src/vender/line/files'
+import { useStore } from '@/app/components/workflow/store'
+import type {
+ ConversationVariable,
+} from '@/app/components/workflow/types'
+import { ChatVarType } from '@/app/components/workflow/panel/chat-variable-panel/type'
+import { CodeLanguage } from '@/app/components/workflow/nodes/code/types'
+import useTimestamp from '@/hooks/use-timestamp'
+import { fetchCurrentValueOfConversationVariable } from '@/service/workflow'
+import cn from '@/utils/classnames'
+
+export type Props = {
+ conversationID: string
+ onHide: () => void
+}
+
+const ConversationVariableModal = ({
+ conversationID,
+ onHide,
+}: Props) => {
+ const { t } = useTranslation()
+ const { formatTime } = useTimestamp()
+ const varList = useStore(s => s.conversationVariables) as ConversationVariable[]
+ const appID = useStore(s => s.appId)
+ const [currentVar, setCurrentVar] = React.useState(varList[0])
+ const [latestValueMap, setLatestValueMap] = React.useState>({})
+ const [latestValueTimestampMap, setLatestValueTimestampMap] = React.useState>({})
+
+ const getChatVarLatestValues = useCallback(async () => {
+ if (conversationID && varList.length > 0) {
+ const res = await fetchCurrentValueOfConversationVariable({
+ url: `/apps/${appID}/conversation-variables`,
+ params: { conversation_id: conversationID },
+ })
+ if (res.data.length > 0) {
+ const valueMap = res.data.reduce((acc: any, cur) => {
+ acc[cur.id] = cur.value
+ return acc
+ }, {})
+ setLatestValueMap(valueMap)
+ const timestampMap = res.data.reduce((acc: any, cur) => {
+ acc[cur.id] = cur.updated_at
+ return acc
+ }, {})
+ setLatestValueTimestampMap(timestampMap)
+ }
+ }
+ }, [appID, conversationID, varList.length])
+
+ const [isCopied, setIsCopied] = React.useState(false)
+ const handleCopy = useCallback(() => {
+ copy(currentVar.value)
+ setIsCopied(true)
+ setTimeout(() => {
+ setIsCopied(false)
+ }, 2000)
+ }, [currentVar.value])
+
+ useMount(() => {
+ getChatVarLatestValues()
+ })
+
+ return (
+ { }}
+ className={cn('w-[920px] max-w-[920px] h-[640px] p-0')}
+ >
+
+
+
+
+ {/* LEFT */}
+
+
{t('workflow.chatVariable.panelTitle')}
+
+ {varList.map(chatVar => (
+
setCurrentVar(chatVar)}>
+
+
{chatVar.name}
+
+ ))}
+
+
+ {/* RIGHT */}
+
+
+
+
{currentVar.name}
+
{capitalize(currentVar.value_type)}
+
+
+
+
+
{t('workflow.chatVariable.storedContent').toLocaleUpperCase()}
+
+ {latestValueTimestampMap[currentVar.id] && (
+
{t('workflow.chatVariable.updatedAt')}{formatTime(latestValueTimestampMap[currentVar.id], t('appLog.dateTimeFormat') as string)}
+ )}
+
+
+ {currentVar.value_type !== ChatVarType.Number && currentVar.value_type !== ChatVarType.String && (
+
+
+
JSON
+
+ {!isCopied
+ ? (
+
+ )
+ : (
+
+ )
+ }
+
+
+
+
+
+
+ )}
+ {(currentVar.value_type === ChatVarType.Number || currentVar.value_type === ChatVarType.String) && (
+
{latestValueMap[currentVar.id] || ''}
+ )}
+
+
+
+
+
+ )
+}
+
+export default ConversationVariableModal
diff --git a/web/app/components/workflow/panel/debug-and-preview/index.tsx b/web/app/components/workflow/panel/debug-and-preview/index.tsx
index 72a601bed9bde0..1f94b4fbc3b112 100644
--- a/web/app/components/workflow/panel/debug-and-preview/index.tsx
+++ b/web/app/components/workflow/panel/debug-and-preview/index.tsx
@@ -1,19 +1,26 @@
import {
memo,
useRef,
+ useState,
} from 'react'
import { useKeyPress } from 'ahooks'
-import { RiCloseLine } from '@remixicon/react'
+import { RiCloseLine, RiEqualizer2Line } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
+import { useNodes } from 'reactflow'
import {
useEdgesInteractions,
useNodesInteractions,
useWorkflowInteractions,
} from '../../hooks'
+import { BlockEnum } from '../../types'
+import type { StartNodeType } from '../../nodes/start/types'
import ChatWrapper from './chat-wrapper'
import cn from '@/utils/classnames'
-import Button from '@/app/components/base/button'
import { RefreshCcw01 } from '@/app/components/base/icons/src/vender/line/arrows'
+import { BubbleX } from '@/app/components/base/icons/src/vender/line/others'
+import TooltipPlus from '@/app/components/base/tooltip-plus'
+import ActionButton, { ActionButtonState } from '@/app/components/base/action-button'
+import { useStore } from '@/app/components/workflow/store'
export type ChatWrapperRefType = {
handleRestart: () => void
@@ -24,6 +31,13 @@ const DebugAndPreview = () => {
const { handleCancelDebugAndPreviewPanel } = useWorkflowInteractions()
const { handleNodeCancelRunningStatus } = useNodesInteractions()
const { handleEdgeCancelRunningStatus } = useEdgesInteractions()
+ const varList = useStore(s => s.conversationVariables)
+ const [expanded, setExpanded] = useState(true)
+ const nodes = useNodes()
+ const startNode = nodes.find(node => node.data.type === BlockEnum.Start)
+ const variables = startNode?.data.variables || []
+
+ const [showConversationVariableModal, setShowConversationVariableModal] = useState(false)
const handleRestartChat = () => {
handleNodeCancelRunningStatus()
@@ -40,28 +54,43 @@ const DebugAndPreview = () => {
return (
-
- {t('workflow.common.debugAndPreview').toLocaleUpperCase()}
-
-
handleRestartChat()}
+
+
{t('workflow.common.debugAndPreview').toLocaleUpperCase()}
+
+
-
- handleRestartChat()}>
+
+
+
+ {varList.length > 0 && (
+
- {t('common.operation.refresh')}
+ setShowConversationVariableModal(true)}>
+
+
+
+ )}
+ {variables.length > 0 && (
+
+
+ setExpanded(!expanded)}>
+
+
+
+ {expanded &&
}
-
Shift
-
R
-
+ )}
{
-
+ setShowConversationVariableModal(false)}
+ showInputsFieldsPanel={expanded}
+ />
)
diff --git a/web/app/components/workflow/panel/debug-and-preview/user-input.tsx b/web/app/components/workflow/panel/debug-and-preview/user-input.tsx
index 300ea68a4b55cc..d80e3b60059076 100644
--- a/web/app/components/workflow/panel/debug-and-preview/user-input.tsx
+++ b/web/app/components/workflow/panel/debug-and-preview/user-input.tsx
@@ -1,10 +1,7 @@
import {
memo,
- useState,
} from 'react'
-import { useTranslation } from 'react-i18next'
import { useNodes } from 'reactflow'
-import { RiArrowDownSLine } from '@remixicon/react'
import FormItem from '../../nodes/_base/components/before-run-form/form-item'
import { BlockEnum } from '../../types'
import {
@@ -12,11 +9,10 @@ import {
useWorkflowStore,
} from '../../store'
import type { StartNodeType } from '../../nodes/start/types'
+import cn from '@/utils/classnames'
const UserInput = () => {
- const { t } = useTranslation()
const workflowStore = useWorkflowStore()
- const [expanded, setExpanded] = useState(true)
const inputs = useStore(s => s.inputs)
const nodes = useNodes
()
const startNode = nodes.find(node => node.data.type === BlockEnum.Start)
@@ -33,46 +29,21 @@ const UserInput = () => {
return null
return (
-
-
setExpanded(!expanded)}
- >
-
- {t('workflow.panel.userInputField').toLocaleUpperCase()}
-
-
- {
- expanded && (
-
- {
- variables.map((variable, index) => (
-
- handleValueChange(variable.variable, v)}
- />
-
- ))
- }
-
- )
- }
+
+
+ {variables.map((variable, index) => (
+
+ handleValueChange(variable.variable, v)}
+ />
+
+ ))}
)
diff --git a/web/app/components/workflow/panel/env-panel/env-item.tsx b/web/app/components/workflow/panel/env-panel/env-item.tsx
new file mode 100644
index 00000000000000..f7c028389e82d5
--- /dev/null
+++ b/web/app/components/workflow/panel/env-panel/env-item.tsx
@@ -0,0 +1,53 @@
+import { memo, useState } from 'react'
+import { capitalize } from 'lodash-es'
+import { RiDeleteBinLine, RiEditLine, RiLock2Line } from '@remixicon/react'
+import { Env } from '@/app/components/base/icons/src/vender/line/others'
+import { useStore } from '@/app/components/workflow/store'
+import type { EnvironmentVariable } from '@/app/components/workflow/types'
+import cn from '@/utils/classnames'
+
+type EnvItemProps = {
+ env: EnvironmentVariable
+ onEdit: (env: EnvironmentVariable) => void
+ onDelete: (env: EnvironmentVariable) => void
+}
+
+const EnvItem = ({
+ env,
+ onEdit,
+ onDelete,
+}: EnvItemProps) => {
+ const envSecrets = useStore(s => s.envSecrets)
+ const [destructive, setDestructive] = useState(false)
+
+ return (
+
+
+
+
+
{env.name}
+
{capitalize(env.value_type)}
+ {env.value_type === 'secret' &&
}
+
+
+
+ onEdit(env)}/>
+
+
setDestructive(true)}
+ onMouseOut={() => setDestructive(false)}
+ >
+ onDelete(env)} />
+
+
+
+
{env.value_type === 'secret' ? envSecrets[env.id] : env.value}
+
+ )
+}
+
+export default memo(EnvItem)
diff --git a/web/app/components/workflow/panel/env-panel/index.tsx b/web/app/components/workflow/panel/env-panel/index.tsx
index 66a3d0524df6a8..756aa17ac471f0 100644
--- a/web/app/components/workflow/panel/env-panel/index.tsx
+++ b/web/app/components/workflow/panel/env-panel/index.tsx
@@ -3,15 +3,14 @@ import {
useCallback,
useState,
} from 'react'
-import { capitalize } from 'lodash-es'
import {
useStoreApi,
} from 'reactflow'
-import { RiCloseLine, RiDeleteBinLine, RiEditLine, RiLock2Line } from '@remixicon/react'
+import { RiCloseLine } from '@remixicon/react'
import { useTranslation } from 'react-i18next'
import { useStore } from '@/app/components/workflow/store'
-import { Env } from '@/app/components/base/icons/src/vender/line/others'
import VariableTrigger from '@/app/components/workflow/panel/env-panel/variable-trigger'
+import EnvItem from '@/app/components/workflow/panel/env-panel/env-item'
import type {
EnvironmentVariable,
} from '@/app/components/workflow/types'
@@ -61,6 +60,11 @@ const EnvPanel = () => {
setNodes(newNodes)
}, [getEffectedNodes, store])
+ const handleEdit = (env: EnvironmentVariable) => {
+ setCurrentVar(env)
+ setShowVariableModal(true)
+ }
+
const handleDelete = useCallback((env: EnvironmentVariable) => {
removeUsedVarInNodes(env)
updateEnvList(envList.filter(e => e.id !== env.id))
@@ -145,7 +149,7 @@ const EnvPanel = () => {
return (
@@ -171,31 +175,12 @@ const EnvPanel = () => {
{envList.map(env => (
-
-
-
-
-
{env.name}
-
{capitalize(env.value_type)}
- {env.value_type === 'secret' &&
}
-
-
-
- {
- setCurrentVar(env)
- setShowVariableModal(true)
- }}/>
-
-
- deleteCheck(env)} />
-
-
-
-
{env.value_type === 'secret' ? envSecrets[env.id] : env.value}
-
+
))}
{/* type */}
-
{t('workflow.env.modal.type')}
+
{t('workflow.env.modal.type')}
{/* name */}
-
{t('workflow.env.modal.name')}
+
{t('workflow.env.modal.name')}
handleNameChange(e.target.value)}
@@ -125,11 +125,11 @@ const VariableModal = ({
{/* value */}
-
{t('workflow.env.modal.value')}
+
{t('workflow.env.modal.value')}
setValue(e.target.value)}
diff --git a/web/app/components/workflow/panel/env-panel/variable-trigger.tsx b/web/app/components/workflow/panel/env-panel/variable-trigger.tsx
index 95706798e5639e..467f612cdb2562 100644
--- a/web/app/components/workflow/panel/env-panel/variable-trigger.tsx
+++ b/web/app/components/workflow/panel/env-panel/variable-trigger.tsx
@@ -4,7 +4,6 @@ import { useTranslation } from 'react-i18next'
import { RiAddLine } from '@remixicon/react'
import Button from '@/app/components/base/button'
import VariableModal from '@/app/components/workflow/panel/env-panel/variable-modal'
-// import cn from '@/utils/classnames'
import {
PortalToFollowElem,
PortalToFollowElemContent,
diff --git a/web/app/components/workflow/panel/index.tsx b/web/app/components/workflow/panel/index.tsx
index 6cd4d4f7b5f52c..864e24aa80f58c 100644
--- a/web/app/components/workflow/panel/index.tsx
+++ b/web/app/components/workflow/panel/index.tsx
@@ -13,6 +13,7 @@ import DebugAndPreview from './debug-and-preview'
import Record from './record'
import WorkflowPreview from './workflow-preview'
import ChatRecord from './chat-record'
+import ChatVariablePanel from './chat-variable-panel'
import EnvPanel from './env-panel'
import cn from '@/utils/classnames'
import { useStore as useAppStore } from '@/app/components/app/store'
@@ -25,6 +26,7 @@ const Panel: FC = () => {
const historyWorkflowData = useStore(s => s.historyWorkflowData)
const showDebugAndPreviewPanel = useStore(s => s.showDebugAndPreviewPanel)
const showEnvPanel = useStore(s => s.showEnvPanel)
+ const showChatVariablePanel = useStore(s => s.showChatVariablePanel)
const isRestoring = useStore(s => s.isRestoring)
const {
enableShortcuts,
@@ -90,6 +92,11 @@ const Panel: FC = () => {
)
}
+ {
+ showChatVariablePanel && (
+
+ )
+ }
)
}
diff --git a/web/app/components/workflow/run/index.tsx b/web/app/components/workflow/run/index.tsx
index 8b9981346bcc54..702ce06e1cea20 100644
--- a/web/app/components/workflow/run/index.tsx
+++ b/web/app/components/workflow/run/index.tsx
@@ -63,30 +63,25 @@ const RunPanel: FC
= ({ hideResult, activeTab = 'RESULT', runID, getRe
const formatNodeList = useCallback((list: NodeTracing[]) => {
const allItems = list.reverse()
const result: NodeTracing[] = []
- let iterationIndexInfos: {
- start: number
- end: number
- }[] = []
+ let iterationIndex = 0
allItems.forEach((item) => {
- const { node_type, index, execution_metadata } = item
+ const { node_type, execution_metadata } = item
if (node_type !== BlockEnum.Iteration) {
- let isInIteration = false
- let isIterationFirstNode = false
- iterationIndexInfos.forEach(({ start, end }) => {
- if (index >= start && index < end) {
- if (index === start)
- isIterationFirstNode = true
+ const isInIteration = !!execution_metadata?.iteration_id
- isInIteration = true
- }
- })
if (isInIteration) {
const iterationDetails = result[result.length - 1].details!
- if (isIterationFirstNode)
+ const currentIterationIndex = execution_metadata?.iteration_index
+ const isIterationFirstNode = iterationIndex !== currentIterationIndex || iterationDetails.length === 0
+
+ if (isIterationFirstNode) {
iterationDetails!.push([item])
+ iterationIndex = currentIterationIndex!
+ }
- else
+ else {
iterationDetails[iterationDetails.length - 1].push(item)
+ }
return
}
@@ -96,26 +91,6 @@ const RunPanel: FC = ({ hideResult, activeTab = 'RESULT', runID, getRe
return
}
- const { steps_boundary } = execution_metadata
- iterationIndexInfos = []
- steps_boundary.forEach((boundary, index) => {
- if (index === 0) {
- iterationIndexInfos.push({
- start: boundary,
- end: 0,
- })
- }
- else if (index === steps_boundary.length - 1) {
- iterationIndexInfos[iterationIndexInfos.length - 1].end = boundary
- }
- else {
- iterationIndexInfos[iterationIndexInfos.length - 1].end = boundary
- iterationIndexInfos.push({
- start: boundary,
- end: 0,
- })
- }
- })
result.push({
...item,
details: [],
diff --git a/web/app/components/workflow/run/node.tsx b/web/app/components/workflow/run/node.tsx
index f5df961d21e25e..f0f7ec51732a80 100644
--- a/web/app/components/workflow/run/node.tsx
+++ b/web/app/components/workflow/run/node.tsx
@@ -123,7 +123,7 @@ const NodePanel: FC = ({
-
{t('workflow.nodes.iteration.iteration', { count: nodeInfo.metadata?.iterator_length || (nodeInfo.execution_metadata?.steps_boundary?.length - 1) })}
+
{t('workflow.nodes.iteration.iteration', { count: nodeInfo.metadata?.iterator_length })}
{justShowIterationNavArrow
? (
diff --git a/web/app/components/workflow/store.ts b/web/app/components/workflow/store.ts
index 9df22355dd6d38..854684e5c39c05 100644
--- a/web/app/components/workflow/store.ts
+++ b/web/app/components/workflow/store.ts
@@ -11,6 +11,7 @@ import type {
} from './help-line/types'
import type { VariableAssignerNodeType } from './nodes/variable-assigner/types'
import type {
+ ConversationVariable,
Edge,
EnvironmentVariable,
HistoryWorkflowData,
@@ -21,6 +22,24 @@ import type {
} from './types'
import { WorkflowContext } from './context'
+// #TODO chatVar#
+// const MOCK_DATA = [
+// {
+// id: 'fjlaksdjflkjg-dfjlajfl0dnfkafjk-djfdkafj-djfak',
+// name: 'chat_history',
+// value_type: 'array[message]',
+// value: [],
+// description: 'The chat history of the conversation',
+// },
+// {
+// id: 'fljdaklfjl-dfjlafj0-dklajglje-eknglh',
+// name: 'order_id',
+// value: '123456',
+// value_type: 'string',
+// description: '',
+// },
+// ]
+
type PreviewRunningData = WorkflowRunningData & {
resultTabActive?: boolean
resultText?: string
@@ -90,6 +109,10 @@ type Shape = {
setEnvironmentVariables: (environmentVariables: EnvironmentVariable[]) => void
envSecrets: Record
setEnvSecrets: (envSecrets: Record) => void
+ showChatVariablePanel: boolean
+ setShowChatVariablePanel: (showChatVariablePanel: boolean) => void
+ conversationVariables: ConversationVariable[]
+ setConversationVariables: (conversationVariables: ConversationVariable[]) => void
selection: null | { x1: number; y1: number; x2: number; y2: number }
setSelection: (selection: Shape['selection']) => void
bundleNodeSize: { width: number; height: number } | null
@@ -204,6 +227,10 @@ export const createWorkflowStore = () => {
setEnvironmentVariables: environmentVariables => set(() => ({ environmentVariables })),
envSecrets: {},
setEnvSecrets: envSecrets => set(() => ({ envSecrets })),
+ showChatVariablePanel: false,
+ setShowChatVariablePanel: showChatVariablePanel => set(() => ({ showChatVariablePanel })),
+ conversationVariables: [],
+ setConversationVariables: conversationVariables => set(() => ({ conversationVariables })),
selection: null,
setSelection: selection => set(() => ({ selection })),
bundleNodeSize: null,
diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts
index 8b0e3113bc109e..03f78ea21b1646 100644
--- a/web/app/components/workflow/types.ts
+++ b/web/app/components/workflow/types.ts
@@ -8,6 +8,7 @@ import type { ToolDefaultValue } from '@/app/components/workflow/block-selector/
import type { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types'
import type { NodeTracing } from '@/types/workflow'
import type { Collection, Tool } from '@/app/components/tools/types'
+import type { ChatVarType } from '@/app/components/workflow/panel/chat-variable-panel/type'
export enum BlockEnum {
Start = 'start',
@@ -25,6 +26,7 @@ export enum BlockEnum {
Tool = 'tool',
ParameterExtractor = 'parameter-extractor',
Iteration = 'iteration',
+ Assigner = 'assigner', // is now named as VariableAssigner
}
export type Branch = {
@@ -109,6 +111,14 @@ export type EnvironmentVariable = {
value_type: 'string' | 'number' | 'secret'
}
+export type ConversationVariable = {
+ id: string
+ name: string
+ value_type: ChatVarType
+ value: any
+ description: string
+}
+
export type VariableWithValue = {
key: string
value: string
@@ -132,6 +142,7 @@ export type InputVar = {
nodeType: BlockEnum
nodeName: string
variable: string
+ isChatVar?: boolean
}
variable: string
max_length?: number
@@ -194,6 +205,7 @@ export enum VarType {
boolean = 'boolean',
object = 'object',
array = 'array',
+ file = 'file',
arrayString = 'array[string]',
arrayNumber = 'array[number]',
arrayObject = 'array[object]',
@@ -209,6 +221,7 @@ export type Var = {
isSelect?: boolean
options?: string[]
required?: boolean
+ des?: string
}
export type NodeOutPutVar = {
diff --git a/web/app/styles/globals.css b/web/app/styles/globals.css
index 238166a8107468..f0a8e466d6d04c 100644
--- a/web/app/styles/globals.css
+++ b/web/app/styles/globals.css
@@ -142,6 +142,12 @@ button:focus-within {
line-height: 12px;
}
+.system-2xs-regular {
+ font-size: 10px;
+ font-weight: 400;
+ line-height: 12px;
+}
+
.system-2xs-medium {
font-size: 10px;
font-weight: 500;
diff --git a/web/docker/entrypoint.sh b/web/docker/entrypoint.sh
index 9c30bce74889a7..a19c543d684e1b 100755
--- a/web/docker/entrypoint.sh
+++ b/web/docker/entrypoint.sh
@@ -19,5 +19,6 @@ export NEXT_PUBLIC_PUBLIC_API_PREFIX=${APP_API_URL}/api
export NEXT_PUBLIC_SENTRY_DSN=${SENTRY_DSN}
export NEXT_PUBLIC_SITE_ABOUT=${SITE_ABOUT}
+export NEXT_TELEMETRY_DISABLED=${NEXT_TELEMETRY_DISABLED}
pm2 start ./pm2.json --no-daemon
diff --git a/web/hooks/use-app-favicon.ts b/web/hooks/use-app-favicon.ts
new file mode 100644
index 00000000000000..8904b884ced2f4
--- /dev/null
+++ b/web/hooks/use-app-favicon.ts
@@ -0,0 +1,23 @@
+import { useAsyncEffect } from 'ahooks'
+import { appDefaultIconBackground } from '@/config'
+import { searchEmoji } from '@/utils/emoji'
+
+export function useAppFavicon(enable: boolean, icon?: string, icon_background?: string) {
+ useAsyncEffect(async () => {
+ if (!enable)
+ return
+ const link: HTMLLinkElement = document.querySelector('link[rel*="icon"]') || document.createElement('link')
+
+ // eslint-disable-next-line prefer-template
+ link.href = 'data:image/svg+xml,'
+ + ' '
+ + ''
+ + (icon ? await searchEmoji(icon) : '🤖')
+ + ' '
+ + ' '
+
+ link.rel = 'shortcut icon'
+ link.type = 'image/svg'
+ document.getElementsByTagName('head')[0].appendChild(link)
+ }, [enable, icon, icon_background])
+}
diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts
index dc7396702c2306..2101aa82609441 100644
--- a/web/i18n/de-DE/app.ts
+++ b/web/i18n/de-DE/app.ts
@@ -50,6 +50,56 @@ const translation = {
ok: 'OK',
cancel: 'Abbrechen',
},
+ switch: 'Zu Workflow-Orchestrierung wechseln',
+ switchTipStart: 'Eine neue App-Kopie wird für Sie erstellt, und die neue Kopie wird zur Workflow-Orchestrierung wechseln. Die neue Kopie wird ',
+ switchTip: 'nicht erlauben',
+ switchTipEnd: ' zur Basis-Orchestrierung zurückzuwechseln.',
+ switchLabel: 'Die zu erstellende App-Kopie',
+ removeOriginal: 'Ursprüngliche App löschen',
+ switchStart: 'Wechsel starten',
+ typeSelector: {
+ all: 'ALLE Typen',
+ chatbot: 'Chatbot',
+ agent: 'Agent',
+ workflow: 'Workflow',
+ completion: 'Vervollständigung',
+ },
+ tracing: {
+ title: 'Anwendungsleistung nachverfolgen',
+ description: 'Konfiguration eines Drittanbieter-LLMOps-Anbieters und Nachverfolgung der Anwendungsleistung.',
+ config: 'Konfigurieren',
+ collapse: 'Einklappen',
+ expand: 'Ausklappen',
+ tracing: 'Nachverfolgung',
+ disabled: 'Deaktiviert',
+ disabledTip: 'Bitte zuerst den Anbieter konfigurieren',
+ enabled: 'In Betrieb',
+ tracingDescription: 'Erfassung des vollständigen Kontexts der Anwendungsausführung, einschließlich LLM-Aufrufe, Kontext, Prompts, HTTP-Anfragen und mehr, auf einer Nachverfolgungsplattform von Drittanbietern.',
+ configProviderTitle: {
+ configured: 'Konfiguriert',
+ notConfigured: 'Anbieter konfigurieren, um Nachverfolgung zu aktivieren',
+ moreProvider: 'Weitere Anbieter',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'Eine All-in-One-Entwicklerplattform für jeden Schritt des LLM-gesteuerten Anwendungslebenszyklus.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Traces, Bewertungen, Prompt-Management und Metriken zum Debuggen und Verbessern Ihrer LLM-Anwendung.',
+ },
+ inUse: 'In Verwendung',
+ configProvider: {
+ title: 'Konfigurieren ',
+ placeholder: 'Geben Sie Ihren {{key}} ein',
+ project: 'Projekt',
+ publicKey: 'Öffentlicher Schlüssel',
+ secretKey: 'Geheimer Schlüssel',
+ viewDocsLink: '{{key}}-Dokumentation ansehen',
+ removeConfirmTitle: '{{key}}-Konfiguration entfernen?',
+ removeConfirmContent: 'Die aktuelle Konfiguration wird verwendet. Das Entfernen wird die Nachverfolgungsfunktion ausschalten.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/de-DE/common.ts b/web/i18n/de-DE/common.ts
index bd0ef66e557702..a7c7bb58fd3d03 100644
--- a/web/i18n/de-DE/common.ts
+++ b/web/i18n/de-DE/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Abbrechen',
clear: 'Leeren',
save: 'Speichern',
+ saveAndEnable: 'Speichern und Aktivieren',
edit: 'Bearbeiten',
add: 'Hinzufügen',
added: 'Hinzugefügt',
diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts
index 038ff1e675c037..fe72d68c3e9ce6 100644
--- a/web/i18n/de-DE/workflow.ts
+++ b/web/i18n/de-DE/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Zurück zum Editor',
conversationLog: 'Konversationsprotokoll',
features: 'Funktionen',
- debugAndPreview: 'Debuggen und Vorschau',
+ debugAndPreview: 'Vorschau',
restart: 'Neustarten',
currentDraft: 'Aktueller Entwurf',
currentDraftUnpublished: 'Aktueller Entwurf unveröffentlicht',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: 'Nach dem Workflow-Update ist eine Neukonfiguration des Tools erforderlich.',
viewDetailInTracingPanel: 'Details anzeigen',
},
+ env: {
+ envPanelTitle: 'Umgebungsvariablen',
+ envDescription: 'Umgebungsvariablen können zur Speicherung privater Informationen und Anmeldedaten verwendet werden. Sie sind schreibgeschützt und können beim Export vom DSL-File getrennt werden.',
+ envPanelButton: 'Variable hinzufügen',
+ modal: {
+ title: 'Umgebungsvariable hinzufügen',
+ editTitle: 'Umgebungsvariable bearbeiten',
+ type: 'Typ',
+ name: 'Name',
+ namePlaceholder: 'Umgebungsname',
+ value: 'Wert',
+ valuePlaceholder: 'Umgebungswert',
+ secretTip: 'Wird verwendet, um sensible Informationen oder Daten zu definieren, wobei DSL-Einstellungen zur Verhinderung von Lecks konfiguriert sind.',
+ },
+ export: {
+ title: 'Geheime Umgebungsvariablen exportieren?',
+ checkbox: 'Geheime Werte exportieren',
+ ignore: 'DSL exportieren',
+ export: 'DSL mit geheimen Werten exportieren',
+ },
+ },
changeHistory: {
title: 'Änderungsverlauf',
placeholder: 'Du hast noch nichts geändert',
diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts
index 233ba40450b034..78bc58e4b9275e 100644
--- a/web/i18n/en-US/workflow.ts
+++ b/web/i18n/en-US/workflow.ts
@@ -99,6 +99,33 @@ const translation = {
export: 'Export DSL with secret values ',
},
},
+ chatVariable: {
+ panelTitle: 'Conversation Variables',
+ panelDescription: 'Conversation Variables are used to store interactive information that LLM needs to remember, including conversation history, uploaded files, user preferences. They are read-write. ',
+ docLink: 'Visit our docs to learn more.',
+ button: 'Add Variable',
+ modal: {
+ title: 'Add Conversation Variable',
+ editTitle: 'Edit Conversation Variable',
+ name: 'Name',
+ namePlaceholder: 'Variable name',
+ type: 'Type',
+ value: 'Default Value',
+ valuePlaceholder: 'Default value, leave blank to not set',
+ description: 'Description',
+ descriptionPlaceholder: 'Describe the variable',
+ editInJSON: 'Edit in JSON',
+ oneByOne: 'Add one by one',
+ editInForm: 'Edit in Form',
+ arrayValue: 'Value',
+ addArrayValue: 'Add Value',
+ objectKey: 'Key',
+ objectType: 'Type',
+ objectValue: 'Default Value',
+ },
+ storedContent: 'Stored content',
+ updatedAt: 'Updated at ',
+ },
changeHistory: {
title: 'Change History',
placeholder: 'You haven\'t changed anything yet',
@@ -149,6 +176,7 @@ const translation = {
tabs: {
'searchBlock': 'Search block',
'blocks': 'Blocks',
+ 'searchTool': 'Search tool',
'tools': 'Tools',
'allTool': 'All',
'builtInTool': 'Built-in',
@@ -173,6 +201,7 @@ const translation = {
'http-request': 'HTTP Request',
'variable-assigner': 'Variable Aggregator',
'variable-aggregator': 'Variable Aggregator',
+ 'assigner': 'Variable Assigner',
'iteration-start': 'Iteration Start',
'iteration': 'Iteration',
'parameter-extractor': 'Parameter Extractor',
@@ -189,6 +218,7 @@ const translation = {
'template-transform': 'Convert data to string using Jinja template syntax',
'http-request': 'Allow server requests to be sent over the HTTP protocol',
'variable-assigner': 'Aggregate multi-branch variables into a single variable for unified configuration of downstream nodes.',
+ 'assigner': 'The variable assignment node is used for assigning values to writable variables(like conversation variables).',
'variable-aggregator': 'Aggregate multi-branch variables into a single variable for unified configuration of downstream nodes.',
'iteration': 'Perform multiple steps on a list object until all results are outputted.',
'parameter-extractor': 'Use LLM to extract structured parameters from natural language for tool invocations or HTTP requests.',
@@ -215,6 +245,7 @@ const translation = {
checklistResolved: 'All issues are resolved',
organizeBlocks: 'Organize blocks',
change: 'Change',
+ optional: '(optional)',
},
nodes: {
common: {
@@ -406,6 +437,17 @@ const translation = {
},
setAssignVariable: 'Set assign variable',
},
+ assigner: {
+ 'assignedVariable': 'Assigned Variable',
+ 'writeMode': 'Write Mode',
+ 'writeModeTip': 'When ASSIGNED VARIABLE is an array, append mode adds to the end.',
+ 'over-write': 'Overwrite',
+ 'append': 'Append',
+ 'plus': 'Plus',
+ 'clear': 'Clear',
+ 'setVariable': 'Set Variable',
+ 'variable': 'Variable',
+ },
tool: {
toAuthorize: 'To authorize',
inputVars: 'Input Variables',
diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts
index 5db18939fe3121..fbdeff3a6fb86a 100644
--- a/web/i18n/es-ES/workflow.ts
+++ b/web/i18n/es-ES/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Volver al editor',
conversationLog: 'Registro de conversación',
features: 'Funcionalidades',
- debugAndPreview: 'Depurar y previsualizar',
+ debugAndPreview: 'Vista previa',
restart: 'Reiniciar',
currentDraft: 'Borrador actual',
currentDraftUnpublished: 'Borrador actual no publicado',
@@ -78,6 +78,27 @@ const translation = {
importFailure: 'Error al importar',
importSuccess: 'Importación exitosa',
},
+ env: {
+ envPanelTitle: 'Variables de Entorno',
+ envDescription: 'Las variables de entorno se pueden utilizar para almacenar información privada y credenciales. Son de solo lectura y se pueden separar del archivo DSL durante la exportación.',
+ envPanelButton: 'Añadir Variable',
+ modal: {
+ title: 'Añadir Variable de Entorno',
+ editTitle: 'Editar Variable de Entorno',
+ type: 'Tipo',
+ name: 'Nombre',
+ namePlaceholder: 'nombre de env',
+ value: 'Valor',
+ valuePlaceholder: 'valor de env',
+ secretTip: 'Se utiliza para definir información o datos sensibles, con configuraciones DSL configuradas para prevenir fugas.',
+ },
+ export: {
+ title: '¿Exportar variables de entorno secretas?',
+ checkbox: 'Exportar valores secretos',
+ ignore: 'Exportar DSL',
+ export: 'Exportar DSL con valores secretos',
+ },
+ },
changeHistory: {
title: 'Historial de cambios',
placeholder: 'Aún no has realizado cambios',
diff --git a/web/i18n/fa-IR/app-annotation.ts b/web/i18n/fa-IR/app-annotation.ts
new file mode 100644
index 00000000000000..e78fc8cd7e4976
--- /dev/null
+++ b/web/i18n/fa-IR/app-annotation.ts
@@ -0,0 +1,87 @@
+const translation = {
+ title: 'یادداشتها',
+ name: 'پاسخ یادداشت',
+ editBy: 'پاسخ ویرایش شده توسط {{author}}',
+ noData: {
+ title: 'بدون یادداشت',
+ description: 'شما میتوانید یادداشتها را در حین اشکالزدایی برنامه ویرایش کنید یا یادداشتها را به صورت انبوه در اینجا برای پاسخگویی با کیفیت بالا وارد کنید.',
+ },
+ table: {
+ header: {
+ question: 'سوال',
+ answer: 'پاسخ',
+ createdAt: 'ایجاد شده در',
+ hits: 'بازدیدها',
+ actions: 'اقدامات',
+ addAnnotation: 'افزودن یادداشت',
+ bulkImport: 'واردات انبوه',
+ bulkExport: 'صادرات انبوه',
+ clearAll: 'پاک کردن همه یادداشتها',
+ },
+ },
+ editModal: {
+ title: 'ویرایش پاسخ یادداشت',
+ queryName: 'پرسش کاربر',
+ answerName: 'ربات داستانسرا',
+ yourAnswer: 'پاسخ شما',
+ answerPlaceholder: 'پاسخ خود را اینجا بنویسید',
+ yourQuery: 'پرسش شما',
+ queryPlaceholder: 'پرسش خود را اینجا بنویسید',
+ removeThisCache: 'حذف این یادداشت',
+ createdAt: 'ایجاد شده در',
+ },
+ addModal: {
+ title: 'افزودن پاسخ یادداشت',
+ queryName: 'سوال',
+ answerName: 'پاسخ',
+ answerPlaceholder: 'پاسخ را اینجا بنویسید',
+ queryPlaceholder: 'پرسش را اینجا بنویسید',
+ createNext: 'افزودن پاسخ یادداشتشده دیگر',
+ },
+ batchModal: {
+ title: 'واردات انبوه',
+ csvUploadTitle: 'فایل CSV خود را اینجا بکشید و رها کنید، یا ',
+ browse: 'مرور کنید',
+ tip: 'فایل CSV باید از ساختار زیر پیروی کند:',
+ question: 'سوال',
+ answer: 'پاسخ',
+ contentTitle: 'محتوای تکه',
+ content: 'محتوا',
+ template: 'الگو را از اینجا دانلود کنید',
+ cancel: 'لغو',
+ run: 'اجرای دستهای',
+ runError: 'اجرای دستهای ناموفق بود',
+ processing: 'در حال پردازش دستهای',
+ completed: 'واردات تکمیل شد',
+ error: 'خطای واردات',
+ ok: 'تایید',
+ },
+ errorMessage: {
+ answerRequired: 'پاسخ الزامی است',
+ queryRequired: 'سوال الزامی است',
+ },
+ viewModal: {
+ annotatedResponse: 'پاسخ یادداشتشده',
+ hitHistory: 'تاریخچه بازدید',
+ hit: 'بازدید',
+ hits: 'بازدیدها',
+ noHitHistory: 'بدون تاریخچه بازدید',
+ },
+ hitHistoryTable: {
+ query: 'پرسش',
+ match: 'تطابق',
+ response: 'پاسخ',
+ source: 'منبع',
+ score: 'امتیاز',
+ time: 'زمان',
+ },
+ initSetup: {
+ title: 'راهاندازی اولیه پاسخ یادداشت',
+ configTitle: 'تنظیمات پاسخ یادداشت',
+ confirmBtn: 'ذخیره و فعالسازی',
+ configConfirmBtn: 'ذخیره',
+ },
+ embeddingModelSwitchTip: 'مدل برداریسازی متن یادداشت، تغییر مدلها باعث جاسازی مجدد خواهد شد و هزینههای اضافی به همراه خواهد داشت.',
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/app-api.ts b/web/i18n/fa-IR/app-api.ts
new file mode 100644
index 00000000000000..0548ef2a2bb7ac
--- /dev/null
+++ b/web/i18n/fa-IR/app-api.ts
@@ -0,0 +1,83 @@
+const translation = {
+ apiServer: 'سرور API',
+ apiKey: 'کلید API',
+ status: 'وضعیت',
+ disabled: 'غیرفعال',
+ ok: 'در سرویس',
+ copy: 'کپی',
+ copied: 'کپی شد',
+ play: 'پخش',
+ pause: 'مکث',
+ playing: 'در حال پخش',
+ loading: 'در حال بارگذاری',
+ merMaind: {
+ rerender: 'بازسازی مجدد',
+ },
+ never: 'هرگز',
+ apiKeyModal: {
+ apiSecretKey: 'کلید مخفی API',
+ apiSecretKeyTips: 'برای جلوگیری از سوء استفاده از API، از کلید API خود محافظت کنید. از استفاده از آن به صورت متن ساده در کد فرانتاند خودداری کنید. :)',
+ createNewSecretKey: 'ایجاد کلید مخفی جدید',
+ secretKey: 'کلید مخفی',
+ created: 'ایجاد شده',
+ lastUsed: 'آخرین استفاده',
+ generateTips: 'این کلید را در مکانی امن و قابل دسترس نگه دارید.',
+ },
+ actionMsg: {
+ deleteConfirmTitle: 'این کلید مخفی حذف شود؟',
+ deleteConfirmTips: 'این عمل قابل بازگشت نیست.',
+ ok: 'تایید',
+ },
+ completionMode: {
+ title: 'API برنامه تکمیل',
+ info: 'برای تولید متن با کیفیت بالا، مانند مقالات، خلاصهها و ترجمهها، از API پیامهای تکمیلی با ورودی کاربر استفاده کنید. تولید متن به پارامترهای مدل و قالبهای پرامپت تنظیم شده در مهندسی پرامپت Dify بستگی دارد.',
+ createCompletionApi: 'ایجاد پیام تکمیلی',
+ createCompletionApiTip: 'یک پیام تکمیلی برای پشتیبانی از حالت سوال و جواب ایجاد کنید.',
+ inputsTips: '(اختیاری) فیلدهای ورودی کاربر را به صورت جفتهای کلید-مقدار ارائه دهید که با متغیرهای موجود در مهندسی پرامپت مطابقت دارند. کلید نام متغیر است و مقدار، مقدار پارامتر است. اگر نوع فیلد انتخابی باشد، مقدار ارسال شده باید یکی از گزینههای از پیش تعیین شده باشد.',
+ queryTips: 'محتوای متن ورودی کاربر.',
+ blocking: 'نوع مسدودکننده، منتظر اتمام اجرا و بازگشت نتایج. (درخواستها ممکن است در صورت طولانی بودن فرآیند قطع شوند)',
+ streaming: 'بازگشت جریانی. پیادهسازی بازگشت جریانی بر اساس SSE (رویدادهای ارسالی سرور).',
+ messageFeedbackApi: 'بازخورد پیام (لایک)',
+ messageFeedbackApiTip: 'پیامهای دریافتی را از طرف کاربران نهایی با لایک یا دیسلایک ارزیابی کنید. این دادهها در صفحه گزارشها و یادداشتها قابل مشاهده هستند و برای تنظیم دقیق مدل در آینده استفاده میشوند.',
+ messageIDTip: 'شناسه پیام',
+ ratingTip: 'لایک یا دیسلایک، null برای لغو',
+ parametersApi: 'دریافت اطلاعات پارامترهای برنامه',
+ parametersApiTip: 'بازیابی پارامترهای ورودی پیکربندی شده، شامل نامهای متغیر، نامهای فیلد، انواع و مقادیر پیشفرض. معمولاً برای نمایش این فیلدها در یک فرم یا پر کردن مقادیر پیشفرض پس از بارگیری کلاینت استفاده میشود.',
+ },
+ chatMode: {
+ title: 'API برنامه چت',
+ info: 'برای برنامههای مکالمهای چندمنظوره با استفاده از فرمت سوال و جواب، API پیامهای چت را برای شروع گفتگو فراخوانی کنید. با ارسال شناسه مکالمه بازگشتی، گفتگوهای مداوم را حفظ کنید. پارامترهای پاسخ و قالبها به تنظیمات مهندسی پرامپت Dify بستگی دارند.',
+ createChatApi: 'ایجاد پیام چت',
+ createChatApiTip: 'یک پیام مکالمه جدید ایجاد کنید یا یک گفتگوی موجود را ادامه دهید.',
+ inputsTips: '(اختیاری) فیلدهای ورودی کاربر را به صورت جفتهای کلید-مقدار ارائه دهید که با متغیرهای موجود در مهندسی پرامپت مطابقت دارند. کلید نام متغیر است و مقدار، مقدار پارامتر است. اگر نوع فیلد انتخابی باشد، مقدار ارسال شده باید یکی از گزینههای از پیش تعیین شده باشد.',
+ queryTips: 'محتوای ورودی/سوال کاربر',
+ blocking: 'نوع مسدودکننده، منتظر اتمام اجرا و بازگشت نتایج. (درخواستها ممکن است در صورت طولانی بودن فرآیند قطع شوند)',
+ streaming: 'بازگشت جریانی. پیادهسازی بازگشت جریانی بر اساس SSE (رویدادهای ارسالی سرور).',
+ conversationIdTip: '(اختیاری) شناسه مکالمه: برای اولین مکالمه خالی بگذارید؛ برای ادامه گفتگو، شناسه مکالمه را از متن ارسال کنید.',
+ messageFeedbackApi: 'بازخورد کاربر نهایی پیام، لایک',
+ messageFeedbackApiTip: 'پیامهای دریافتی را از طرف کاربران نهایی با لایک یا دیسلایک ارزیابی کنید. این دادهها در صفحه گزارشها و یادداشتها قابل مشاهده هستند و برای تنظیم دقیق مدل در آینده استفاده میشوند.',
+ messageIDTip: 'شناسه پیام',
+ ratingTip: 'لایک یا دیسلایک، null برای لغو',
+ chatMsgHistoryApi: 'دریافت تاریخچه پیامهای چت',
+ chatMsgHistoryApiTip: 'صفحه اول آخرین `limit` پیام را به صورت معکوس برمیگرداند.',
+ chatMsgHistoryConversationIdTip: 'شناسه مکالمه',
+ chatMsgHistoryFirstId: 'شناسه اولین رکورد چت در صفحه فعلی. پیشفرض هیچ است.',
+ chatMsgHistoryLimit: 'تعداد چتهایی که در یک درخواست برگردانده میشوند',
+ conversationsListApi: 'دریافت لیست مکالمات',
+ conversationsListApiTip: 'لیست جلسات کاربر فعلی را دریافت میکند. به طور پیشفرض، 20 جلسه آخر برگردانده میشود.',
+ conversationsListFirstIdTip: 'شناسه آخرین رکورد در صفحه فعلی، پیشفرض هیچ.',
+ conversationsListLimitTip: 'تعداد چتهایی که در یک درخواست برگردانده میشوند',
+ conversationRenamingApi: 'تغییر نام مکالمه',
+ conversationRenamingApiTip: 'تغییر نام مکالمات؛ نام در رابطهای کاربری چند جلسهای نمایش داده میشود.',
+ conversationRenamingNameTip: 'نام جدید',
+ parametersApi: 'دریافت اطلاعات پارامترهای برنامه',
+ parametersApiTip: 'بازیابی پارامترهای ورودی پیکربندی شده، شامل نامهای متغیر، نامهای فیلد، انواع و مقادیر پیشفرض. معمولاً برای نمایش این فیلدها در یک فرم یا پر کردن مقادیر پیشفرض پس از بارگیری کلاینت استفاده میشود.',
+ },
+ develop: {
+ requestBody: 'بدنه درخواست',
+ pathParams: 'پارامترهای مسیر',
+ query: 'پرسوجو',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/app-debug.ts b/web/i18n/fa-IR/app-debug.ts
new file mode 100644
index 00000000000000..863f47bb18a6f0
--- /dev/null
+++ b/web/i18n/fa-IR/app-debug.ts
@@ -0,0 +1,455 @@
+const translation = {
+ pageTitle: {
+ line1: 'پرومپت',
+ line2: 'مهندسی',
+ },
+ orchestrate: 'هماهنگ کردن',
+ promptMode: {
+ simple: 'برای ویرایش کل پرومپت به حالت کارشناس بروید',
+ advanced: 'حالت کارشناس',
+ switchBack: 'بازگشت',
+ advancedWarning: {
+ title: 'شما به حالت کارشناس رفتهاید، و پس از تغییر پرومپت، نمیتوانید به حالت ساده برگردید.',
+ description: 'در حالت کارشناس، میتوانید کل پرومپت را ویرایش کنید.',
+ learnMore: 'بیشتر بدانید',
+ ok: 'باشه',
+ },
+ operation: {
+ addMessage: 'اضافه کردن پیام',
+ },
+ contextMissing: 'مولفه زمینهای از دست رفته است، اثر بخشی پرومپت ممکن است خوب نباشد.',
+ },
+ operation: {
+ applyConfig: 'انتشار',
+ resetConfig: 'تنظیم مجدد',
+ debugConfig: 'دیباگ',
+ addFeature: 'اضافه کردن ویژگی',
+ automatic: 'تولید کردن',
+ stopResponding: 'توقف پاسخدهی',
+ agree: 'پسندیدن',
+ disagree: 'نپسندیدن',
+ cancelAgree: 'لغو پسندیدن',
+ cancelDisagree: 'لغو نپسندیدن',
+ userAction: 'عمل کاربر',
+ },
+ notSetAPIKey: {
+ title: 'کلید ارائهدهنده LLM تنظیم نشده است',
+ trailFinished: 'آزمایش تمام شد',
+ description: 'کلید ارائهدهنده LLM تنظیم نشده است و باید قبل از دیباگ تنظیم شود.',
+ settingBtn: 'به تنظیمات بروید',
+ },
+ trailUseGPT4Info: {
+ title: 'در حال حاضر پشتیبانی نمیشود gpt-4',
+ description: 'برای استفاده از gpt-4، لطفاً کلید API را تنظیم کنید.',
+ },
+ feature: {
+ groupChat: {
+ title: 'تقویت گفتگو',
+ description: 'افزودن تنظیمات پیش از گفتگو برای برنامهها میتواند تجربه کاربری را بهبود بخشد.',
+ },
+ groupExperience: {
+ title: 'تقویت تجربه',
+ },
+ conversationOpener: {
+ title: 'شروعکننده گفتگو',
+ description: 'در یک برنامه چت، اولین جملهای که AI فعالانه با کاربر صحبت میکند، معمولاً به عنوان خوشامدگویی استفاده میشود.',
+ },
+ suggestedQuestionsAfterAnswer: {
+ title: 'پیگیری',
+ description: 'تنظیم پیشنهاد سوالات بعدی میتواند به کاربران یک چت بهتر ارائه دهد.',
+ resDes: '3 پیشنهاد برای سوال بعدی کاربر.',
+ tryToAsk: 'سعی کنید بپرسید',
+ },
+ moreLikeThis: {
+ title: 'بیشتر از این',
+ description: 'تولید چندین متن به طور همزمان، و سپس ویرایش و ادامه تولید',
+ generateNumTip: 'تعداد تولید هر بار',
+ tip: 'استفاده از این ویژگی هزینههای اضافی توکنها را به همراه دارد',
+ },
+ speechToText: {
+ title: 'تبدیل گفتار به متن',
+ description: 'پس از فعال شدن، میتوانید از ورودی صوتی استفاده کنید.',
+ resDes: 'ورودی صوتی فعال شده است',
+ },
+ textToSpeech: {
+ title: 'تبدیل متن به گفتار',
+ description: 'پس از فعال شدن، متن میتواند به گفتار تبدیل شود.',
+ resDes: 'تبدیل متن به صدا فعال شده است',
+ },
+ citation: {
+ title: 'ارجاعات و استنادات',
+ description: 'پس از فعال شدن، سند منبع و بخش استناد شده از محتوای تولید شده را نشان میدهد.',
+ resDes: 'ارجاعات و استنادات فعال شده است',
+ },
+ annotation: {
+ title: 'پاسخ حاشیهنویسی',
+ description: 'میتوانید پاسخهای با کیفیت بالا را به صورت دستی به حافظه کش اضافه کنید تا با سوالات مشابه کاربران تطبیق یابد.',
+ resDes: 'پاسخ حاشیهنویسی فعال شده است',
+ scoreThreshold: {
+ title: 'آستانه امتیاز',
+ description: 'Used to set the similarity threshold for annotation reply.',
+ easyMatch: 'تطابق آسان',
+ accurateMatch: 'تطابق دقیق',
+ },
+ matchVariable: {
+ title: 'تغییر متغیر',
+ choosePlaceholder: 'انتخاب متغیر تغییر',
+ },
+ cacheManagement: 'حاشیه نویسی',
+ cached: 'حاشیه نویسی شده',
+ remove: 'حذف',
+ removeConfirm: 'این حاشیه نویسی را حذف کنید؟',
+ add: 'افزودن حاشیه نویسی',
+ edit: 'ویرایش حاشیه نویسی',
+ },
+ dataSet: {
+ title: 'زمینه',
+ noData: 'شما میتوانید دانش را به عنوان زمینه وارد کنید',
+ words: 'کلمات',
+ textBlocks: 'بلوکهای متن',
+ selectTitle: 'انتخاب دانش مرجع',
+ selected: 'دانش انتخاب شده',
+ noDataSet: 'هیچ دانشی یافت نشد',
+ toCreate: 'برای ایجاد بروید',
+ notSupportSelectMulti: 'در حال حاضر فقط یک دانش پشتیبانی میشود',
+ queryVariable: {
+ title: 'متغیر پرس و جو',
+ tip: 'این متغیر به عنوان ورودی پرس و جو برای بازیابی زمینه استفاده خواهد شد و اطلاعات زمینه مرتبط با ورودی این متغیر را به دست میآورد.',
+ choosePlaceholder: 'انتخاب متغیر پرس و جو',
+ noVar: 'بدون متغیر',
+ noVarTip: 'لطفاً متغیری را در بخش متغیرها ایجاد کنید',
+ unableToQueryDataSet: 'عدم امکان پرس و جو از دانش',
+ unableToQueryDataSetTip: 'پرس و جوی موفقیت آمیز دانش ممکن نیست، لطفاً یک متغیر پرس و جو زمینه را در بخش زمینه انتخاب کنید.',
+ ok: 'باشه',
+ contextVarNotEmpty: 'متغیر پرس و جو زمینه نمیتواند خالی باشد',
+ deleteContextVarTitle: 'متغیر "{{varName}}" را حذف کنید؟',
+ deleteContextVarTip: 'این متغیر به عنوان متغیر پرس و جو زمینه تنظیم شده است و حذف آن بر استفاده عادی از دانش تأثیر میگذارد. اگر هنوز نیاز به حذف دارید، لطفاً آن را در بخش زمینه دوباره انتخاب کنید.',
+ },
+ },
+ tools: {
+ title: 'ابزارها',
+ tips: 'ابزارها یک روش استاندارد برای فراخوانی API فراهم میکنند و ورودی کاربر یا متغیرها را به عنوان پارامترهای درخواست برای پرس و جو دادههای خارجی به عنوان زمینه میگیرند.',
+ toolsInUse: '{{count}} ابزار در حال استفاده',
+ modal: {
+ title: 'ابزار',
+ toolType: {
+ title: 'نوع ابزار',
+ placeholder: 'لطفاً نوع ابزار را انتخاب کنید',
+ },
+ name: {
+ title: 'نام',
+ placeholder: 'لطفاً نام را وارد کنید',
+ },
+ variableName: {
+ title: 'نام متغیر',
+ placeholder: 'لطفاً نام متغیر را وارد کنید',
+ },
+ },
+ },
+ conversationHistory: {
+ title: 'تاریخچه مکالمه',
+ description: 'تنظیم پیشوند نامها برای نقشهای مکالمه',
+ tip: 'تاریخچه مکالمه فعال نشده است، لطفاً را در فراخوانی بالا اضافه کنید.',
+ learnMore: 'بیشتر بدانید',
+ editModal: {
+ title: 'ویرایش نام نقشهای مکالمه',
+ userPrefix: 'پیشوند کاربر',
+ assistantPrefix: 'پیشوند دستیار',
+ },
+ },
+ toolbox: {
+ title: 'جعبه ابزار',
+ },
+ moderation: {
+ title: 'مدیریت محتوا',
+ description: 'خروجی مدل را با استفاده از API مدیریت یا نگهداری فهرست کلمات حساس امن کنید.',
+ allEnabled: 'محتوای ورودی/خروجی فعال شده',
+ inputEnabled: 'محتوای ورودی فعال شده',
+ outputEnabled: 'محتوای خروجی فعال شده',
+ modal: {
+ title: 'تنظیمات مدیریت محتوا',
+ provider: {
+ title: 'ارائه دهنده',
+ openai: 'مدیریت OpenAI',
+ openaiTip: {
+ prefix: 'مدیریت OpenAI نیاز به کلید API OpenAI دارد که در ',
+ suffix: ' تنظیم شده باشد.',
+ },
+ keywords: 'کلمات کلیدی',
+ },
+ keywords: {
+ tip: 'هر خط یک کلمه، با شکست خطوط جدا شده. حداکثر 100 کاراکتر در هر خط.',
+ placeholder: 'هر خط یک کلمه، با شکست خطوط جدا شده',
+ line: 'خط',
+ },
+ content: {
+ input: 'مدیریت محتوای ورودی',
+ output: 'مدیریت محتوای خروجی',
+ preset: 'پاسخهای پیش فرض',
+ placeholder: 'محتوای پاسخهای پیش فرض در اینجا',
+ condition: 'مدیریت محتوای ورودی و خروجی حداقل یک مورد فعال شده است',
+ fromApi: 'پاسخهای پیش فرض از API برگردانده میشود',
+ errorMessage: 'پاسخهای پیش فرض نمیتواند خالی باشد',
+ supportMarkdown: 'پشتیبانی از Markdown',
+ },
+ openaiNotConfig: {
+ before: 'مدیریت OpenAI نیاز به کلید API OpenAI دارد که در',
+ after: '',
+ },
+ },
+ },
+ generate: {
+ title: 'تولید کننده دستورالعمل',
+ description: 'تولید کننده دستورالعمل از مدل تنظیم شده برای بهینه سازی دستورالعملها برای کیفیت بالاتر و ساختار بهتر استفاده میکند. لطفاً دستورالعملهای واضح و دقیقی بنویسید.',
+ tryIt: 'امتحان کنید',
+ instruction: 'دستورالعملها',
+ instructionPlaceHolder: 'دستورالعملهای واضح و خاصی بنویسید.',
+ generate: 'تولید',
+ resTitle: 'دستورالعمل تولید شده',
+ noDataLine1: 'موارد استفاده خود را در سمت چپ توصیف کنید،',
+ noDataLine2: 'پیشنمایش ارکستراسیون در اینجا نشان داده خواهد شد.',
+ apply: 'اعمال',
+ loading: 'در حال ارکستراسیون برنامه برای شما...',
+ overwriteTitle: 'آیا تنظیمات موجود را لغو میکنید؟',
+ overwriteMessage: 'اعمال این دستورالعمل تنظیمات موجود را لغو خواهد کرد.',
+ template: {
+ pythonDebugger: {
+ name: 'اشکالزدای پایتون',
+ instruction: 'یک بات که میتواند بر اساس دستورالعمل شما کد تولید و اشکالزدایی کند',
+ },
+ translation: {
+ name: 'ترجمه',
+ instruction: 'یک مترجم که میتواند چندین زبان را ترجمه کند',
+ },
+ professionalAnalyst: {
+ name: 'تحلیلگر حرفهای',
+ instruction: 'استخراج بینشها، شناسایی ریسک و خلاصهسازی اطلاعات کلیدی از گزارشهای طولانی به یک یادداشت کوتاه',
+ },
+ excelFormulaExpert: {
+ name: 'کارشناس فرمول اکسل',
+ instruction: 'یک چتبات که میتواند به کاربران مبتدی کمک کند فرمولهای اکسل را بر اساس دستورالعملهای کاربر درک، استفاده و ایجاد کنند',
+ },
+ travelPlanning: {
+ name: 'برنامهریزی سفر',
+ instruction: 'دستیار برنامهریزی سفر یک ابزار هوشمند است که به کاربران کمک میکند سفرهای خود را به راحتی برنامهریزی کنند',
+ },
+ SQLSorcerer: {
+ name: 'جادوگر SQL',
+ instruction: 'تبدیل زبان روزمره به پرس و جوهای SQL',
+ },
+ GitGud: {
+ name: 'Git gud',
+ instruction: 'تولید دستورات مناسب Git بر اساس اقدامات توصیف شده توسط کاربر در کنترل نسخه',
+ },
+ meetingTakeaways: {
+ name: 'نتایج جلسات',
+ instruction: 'خلاصهسازی جلسات به صورت مختصر شامل موضوعات بحث، نکات کلیدی و موارد اقدام',
+ },
+ writingsPolisher: {
+ name: 'پولیشگر نوشتهها',
+ instruction: 'استفاده از تکنیکهای ویرایش پیشرفته برای بهبود نوشتههای شما',
+ },
+ },
+ },
+ resetConfig: {
+ title: 'بازنشانی تأیید میشود؟',
+ message: 'بازنشانی تغییرات را لغو کرده و تنظیمات منتشر شده آخر را بازیابی میکند.',
+ },
+ errorMessage: {
+ nameOfKeyRequired: 'نام کلید: {{key}} مورد نیاز است',
+ valueOfVarRequired: 'مقدار {{key}} نمیتواند خالی باشد',
+ queryRequired: 'متن درخواست مورد نیاز است.',
+ waitForResponse: 'لطفاً منتظر پاسخ به پیام قبلی بمانید.',
+ waitForBatchResponse: 'لطفاً منتظر پاسخ به کار دستهای بمانید.',
+ notSelectModel: 'لطفاً یک مدل را انتخاب کنید',
+ waitForImgUpload: 'لطفاً منتظر بارگذاری تصویر بمانید',
+ },
+ chatSubTitle: 'دستورالعملها',
+ completionSubTitle: 'پیشوند پرس و جو',
+ promptTip: 'دستورالعملها و محدودیتها پاسخهای AI را هدایت میکنند. متغیرهایی مانند {{input}} را درج کنید. این دستورالعمل برای کاربران قابل مشاهده نخواهد بود.',
+ formattingChangedTitle: 'قالببندی تغییر کرد',
+ formattingChangedText: 'تغییر قالببندی منطقه اشکالزدایی را بازنشانی خواهد کرد، آیا مطمئن هستید؟',
+ variableTitle: 'متغیرها',
+ variableTip: 'کاربران متغیرها را در فرم پر میکنند و به طور خودکار متغیرها را در دستورالعملها جایگزین میکنند.',
+ notSetVar: 'متغیرها به کاربران اجازه میدهند که کلمات پرس و جو یا جملات ابتدایی را هنگام پر کردن فرم معرفی کنند. شما میتوانید سعی کنید "{{input}}" را در کلمات پرس و جو وارد کنید.',
+ autoAddVar: 'متغیرهای تعریف نشدهای که در پیشپرسش ذکر شدهاند، آیا میخواهید آنها را به فرم ورودی کاربر اضافه کنید؟',
+ variableTable: {
+ key: 'کلید متغیر',
+ name: 'نام فیلد ورودی کاربر',
+ optional: 'اختیاری',
+ type: 'نوع ورودی',
+ action: 'اقدامات',
+ typeString: 'رشته',
+ typeSelect: 'انتخاب',
+ },
+ varKeyError: {
+ canNoBeEmpty: 'کلید متغیر نمیتواند خالی باشد',
+ tooLong: 'کلید متغیر: {{key}} طولانی است. نمیتواند بیش از 30 کاراکتر باشد',
+ notValid: 'کلید متغیر: {{key}} نامعتبر است. فقط میتواند شامل حروف، اعداد و زیرخط باشد',
+ notStartWithNumber: 'کلید متغیر: {{key}} نمیتواند با عدد شروع شود',
+ keyAlreadyExists: 'کلید متغیر: :{{key}} از قبل وجود دارد',
+ },
+ otherError: {
+ promptNoBeEmpty: 'پرس و جو نمیتواند خالی باشد',
+ historyNoBeEmpty: 'تاریخچه مکالمه باید در پرس و جو تنظیم شود',
+ queryNoBeEmpty: 'پرس و جو باید در پرس و جو تنظیم شود',
+ },
+ variableConig: {
+ 'addModalTitle': 'افزودن فیلد ورودی',
+ 'editModalTitle': 'ویرایش فیلد ورودی',
+ 'description': 'تنظیم برای متغیر {{varName}}',
+ 'fieldType': 'نوع فیلد',
+ 'string': 'متن کوتاه',
+ 'text-input': 'متن کوتاه',
+ 'paragraph': 'پاراگراف',
+ 'select': 'انتخاب',
+ 'number': 'عدد',
+ 'notSet': 'تنظیم نشده، سعی کنید {{input}} را در پرس و جو وارد کنید',
+ 'stringTitle': 'گزینههای جعبه متن فرم',
+ 'maxLength': 'حداکثر طول',
+ 'options': 'گزینهها',
+ 'addOption': 'افزودن گزینه',
+ 'apiBasedVar': 'متغیر مبتنی بر API',
+ 'varName': 'نام متغیر',
+ 'labelName': 'نام برچسب',
+ 'inputPlaceholder': 'لطفاً وارد کنید',
+ 'content': 'محتوا',
+ 'required': 'مورد نیاز',
+ 'errorMsg': {
+ varNameRequired: 'نام متغیر مورد نیاز است',
+ labelNameRequired: 'نام برچسب مورد نیاز است',
+ varNameCanBeRepeat: 'نام متغیر نمیتواند تکراری باشد',
+ atLeastOneOption: 'حداقل یک گزینه مورد نیاز است',
+ optionRepeat: 'گزینههای تکراری وجود دارد',
+ },
+ },
+ vision: {
+ name: 'بینایی',
+ description: 'فعال کردن بینایی به مدل اجازه میدهد تصاویر را دریافت کند و به سوالات مربوط به آنها پاسخ دهد.',
+ settings: 'تنظیمات',
+ visionSettings: {
+ title: 'تنظیمات بینایی',
+ resolution: 'وضوح',
+ resolutionTooltip: `وضوح پایین به مدل اجازه میدهد نسخه 512x512 کموضوح تصویر را دریافت کند و تصویر را با بودجه 65 توکن نمایش دهد. این به API اجازه میدهد پاسخهای سریعتری بدهد و توکنهای ورودی کمتری برای موارد استفاده که نیاز به جزئیات بالا ندارند مصرف کند.
+ \n
+ وضوح بالا ابتدا به مدل اجازه میدهد تصویر کموضوح را ببیند و سپس قطعات جزئیات تصویر ورودی را به عنوان مربعهای 512px ایجاد کند. هر کدام از قطعات جزئیات از بودجه توکن دو برابر استفاده میکنند که در مجموع 129 توکن است.`,
+ high: 'بالا',
+ low: 'پایین',
+ uploadMethod: 'روش بارگذاری',
+ both: 'هر دو',
+ localUpload: 'بارگذاری محلی',
+ url: 'URL',
+ uploadLimit: 'محدودیت بارگذاری',
+ },
+ },
+ voice: {
+ name: 'صدا',
+ defaultDisplay: 'صدا پیش فرض',
+ description: 'تنظیمات تبدیل متن به گفتار',
+ settings: 'تنظیمات',
+ voiceSettings: {
+ title: 'تنظیمات صدا',
+ language: 'زبان',
+ resolutionTooltip: 'پشتیبانی از زبان صدای تبدیل متن به گفتار.',
+ voice: 'صدا',
+ autoPlay: 'پخش خودکار',
+ autoPlayEnabled: 'روشن کردن',
+ autoPlayDisabled: 'خاموش کردن',
+ },
+ },
+ openingStatement: {
+ title: 'شروع مکالمه',
+ add: 'افزودن',
+ writeOpener: 'نوشتن آغازگر',
+ placeholder: 'پیام آغازگر خود را اینجا بنویسید، میتوانید از متغیرها استفاده کنید، سعی کنید {{variable}} را تایپ کنید.',
+ openingQuestion: 'سوالات آغازین',
+ noDataPlaceHolder: 'شروع مکالمه با کاربر میتواند به AI کمک کند تا ارتباط نزدیکتری با آنها برقرار کند.',
+ varTip: 'میتوانید از متغیرها استفاده کنید، سعی کنید {{variable}} را تایپ کنید',
+ tooShort: 'حداقل 20 کلمه از پرسش اولیه برای تولید نظرات آغازین مکالمه مورد نیاز است.',
+ notIncludeKey: 'پرسش اولیه شامل متغیر: {{key}} نمیشود. لطفاً آن را به پرسش اولیه اضافه کنید.',
+ },
+ modelConfig: {
+ model: 'مدل',
+ setTone: 'تنظیم لحن پاسخها',
+ title: 'مدل و پارامترها',
+ modeType: {
+ chat: 'چت',
+ completion: 'تکمیل',
+ },
+ },
+ inputs: {
+ title: 'اشکالزدایی و پیشنمایش',
+ noPrompt: 'سعی کنید پرسشهایی را در ورودی پیشپرسش بنویسید',
+ userInputField: 'فیلد ورودی کاربر',
+ noVar: 'مقدار متغیر را پر کنید، که به طور خودکار در کلمات پرس و جو در هر بار شروع یک جلسه جدید جایگزین میشود.',
+ chatVarTip: 'مقدار متغیر را پر کنید، که به طور خودکار در کلمات پرس و جو در هر بار شروع یک جلسه جدید جایگزین میشود',
+ completionVarTip: 'مقدار متغیر را پر کنید، که به طور خودکار در کلمات پرس و جو در هر بار ارسال سوال جایگزین میشود.',
+ previewTitle: 'پیشنمایش پرس و جو',
+ queryTitle: 'محتوای پرس و جو',
+ queryPlaceholder: 'لطفاً متن درخواست را وارد کنید.',
+ run: 'اجرا',
+ },
+ result: 'متن خروجی',
+ datasetConfig: {
+ settingTitle: 'تنظیمات بازیابی',
+ knowledgeTip: 'روی دکمه "+" کلیک کنید تا دانش اضافه شود',
+ retrieveOneWay: {
+ title: 'بازیابی N به 1',
+ description: 'بر اساس نیت کاربر و توصیفات دانش، عامل بهترین دانش را برای پرس و جو به طور خودکار انتخاب میکند. بهترین برای برنامههایی با دانش محدود و مشخص.',
+ },
+ retrieveMultiWay: {
+ title: 'بازیابی چند مسیره',
+ description: 'بر اساس نیت کاربر، از تمام دانش پرس و جو میکند، متنهای مرتبط از منابع چندگانه بازیابی میکند و بهترین نتایج مطابقت با پرس و جوی کاربر را پس از مرتبسازی مجدد انتخاب میکند.',
+ },
+ rerankModelRequired: 'مدل مرتبسازی مجدد مورد نیاز است',
+ params: 'پارامترها',
+ top_k: 'Top K',
+ top_kTip: 'برای فیلتر کردن تکههایی که بیشترین شباهت به سوالات کاربر دارند استفاده میشود. سیستم همچنین به طور دینامیک مقدار Top K را بر اساس max_tokens مدل انتخاب شده تنظیم میکند.',
+ score_threshold: 'آستانه نمره',
+ score_thresholdTip: 'برای تنظیم آستانه شباهت برای فیلتر کردن تکهها استفاده میشود.',
+ retrieveChangeTip: 'تغییر حالت شاخص و حالت بازیابی ممکن است بر برنامههای مرتبط با این دانش تأثیر بگذارد.',
+ },
+ debugAsSingleModel: 'اشکالزدایی به عنوان مدل تک',
+ debugAsMultipleModel: 'اشکالزدایی به عنوان مدل چندگانه',
+ duplicateModel: 'تکراری',
+ publishAs: 'انتشار به عنوان',
+ assistantType: {
+ name: 'نوع دستیار',
+ chatAssistant: {
+ name: 'دستیار پایه',
+ description: 'ساخت دستیار مبتنی بر چت با استفاده از مدل زبان بزرگ',
+ },
+ agentAssistant: {
+ name: 'دستیار عامل',
+ description: 'ساخت یک عامل هوشمند که میتواند ابزارها را به طور خودکار برای تکمیل وظایف انتخاب کند',
+ },
+ },
+ agent: {
+ agentMode: 'حالت عامل',
+ agentModeDes: 'تنظیم نوع حالت استنتاج برای عامل',
+ agentModeType: {
+ ReACT: 'ReAct',
+ functionCall: 'فراخوانی تابع',
+ },
+ setting: {
+ name: 'تنظیمات عامل',
+ description: 'تنظیمات دستیار عامل به شما اجازه میدهد حالت عامل و ویژگیهای پیشرفته مانند پرسشهای ساخته شده را تنظیم کنید، فقط در نوع عامل موجود است.',
+ maximumIterations: {
+ name: 'حداکثر تکرارها',
+ description: 'محدود کردن تعداد تکرارهایی که دستیار عامل میتواند اجرا کند',
+ },
+ },
+ buildInPrompt: 'پرسشهای ساخته شده',
+ firstPrompt: 'اولین پرسش',
+ nextIteration: 'تکرار بعدی',
+ promptPlaceholder: 'پرسش خود را اینجا بنویسید',
+ tools: {
+ name: 'ابزارها',
+ description: 'استفاده از ابزارها میتواند قابلیتهای LLM را گسترش دهد، مانند جستجو در اینترنت یا انجام محاسبات علمی',
+ enabled: 'فعال',
+ },
+ },
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/app-log.ts b/web/i18n/fa-IR/app-log.ts
new file mode 100644
index 00000000000000..80919c1c3a3d3b
--- /dev/null
+++ b/web/i18n/fa-IR/app-log.ts
@@ -0,0 +1,91 @@
+const translation = {
+ title: 'لاگها',
+ description: 'لاگها وضعیت اجرایی برنامه را ثبت میکنند، شامل ورودیهای کاربر و پاسخهای هوش مصنوعی.',
+ dateTimeFormat: 'MM/DD/YYYY hh:mm A',
+ table: {
+ header: {
+ time: 'زمان',
+ endUser: 'کاربر نهایی',
+ input: 'ورودی',
+ output: 'خروجی',
+ summary: 'عنوان',
+ messageCount: 'تعداد پیام',
+ userRate: 'امتیاز کاربر',
+ adminRate: 'امتیاز اپراتور',
+ startTime: 'زمان شروع',
+ status: 'وضعیت',
+ runtime: 'زمان اجرا',
+ tokens: 'توکنها',
+ user: 'کاربر نهایی',
+ version: 'نسخه',
+ },
+ pagination: {
+ previous: 'قبلی',
+ next: 'بعدی',
+ },
+ empty: {
+ noChat: 'هنوز مکالمهای وجود ندارد',
+ noOutput: 'خروجی وجود ندارد',
+ element: {
+ title: 'کسی هست؟',
+ content: 'در اینجا تعاملات بین کاربران نهایی و برنامههای هوش مصنوعی را مشاهده و حاشیهنویسی کنید تا دقت هوش مصنوعی بهبود یابد. میتوانید به اشتراک بگذارید یا برنامه وب را تست کنید و سپس به این صفحه برگردید.',
+ },
+ },
+ },
+ detail: {
+ time: 'زمان',
+ conversationId: 'شناسه مکالمه',
+ promptTemplate: 'قالب درخواست',
+ promptTemplateBeforeChat: 'قالب درخواست قبل از چت · به عنوان پیام سیستمی',
+ annotationTip: 'بهبودها توسط {{user}} علامتگذاری شده است',
+ timeConsuming: '',
+ second: 'ثانیه',
+ tokenCost: 'توکن مصرفی',
+ loading: 'در حال بارگذاری',
+ operation: {
+ like: 'پسندیدن',
+ dislike: 'نپسندیدن',
+ addAnnotation: 'اضافه کردن بهبود',
+ editAnnotation: 'ویرایش بهبود',
+ annotationPlaceholder: 'پاسخ مورد انتظاری که میخواهید هوش مصنوعی بدهد را وارد کنید، که میتواند برای بهبود مدل و کیفیت تولید متن در آینده استفاده شود.',
+ },
+ variables: 'متغیرها',
+ uploadImages: 'تصاویر آپلود شده',
+ },
+ filter: {
+ period: {
+ today: 'امروز',
+ last7days: '7 روز گذشته',
+ last4weeks: '4 هفته گذشته',
+ last3months: '3 ماه گذشته',
+ last12months: '12 ماه گذشته',
+ monthToDate: 'از ابتدای ماه تاکنون',
+ quarterToDate: 'از ابتدای فصل تاکنون',
+ yearToDate: 'از ابتدای سال تاکنون',
+ allTime: 'همه زمانها',
+ },
+ annotation: {
+ all: 'همه',
+ annotated: 'بهبودهای حاشیهنویسی شده ({{count}} آیتم)',
+ not_annotated: 'حاشیهنویسی نشده',
+ },
+ },
+ workflowTitle: 'لاگهای جریان کاری',
+ workflowSubtitle: 'لاگ عملیات خودکار را ثبت کرده است.',
+ runDetail: {
+ title: 'لاگ مکالمه',
+ workflowTitle: 'جزئیات لاگ',
+ },
+ promptLog: 'لاگ درخواست',
+ agentLog: 'لاگ عامل',
+ viewLog: 'مشاهده لاگ',
+ agentLogDetail: {
+ agentMode: 'حالت عامل',
+ toolUsed: 'ابزار استفاده شده',
+ iterations: 'تکرارها',
+ iteration: 'تکرار',
+ finalProcessing: 'پردازش نهایی',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/app-overview.ts b/web/i18n/fa-IR/app-overview.ts
new file mode 100644
index 00000000000000..1bbd7a02830a01
--- /dev/null
+++ b/web/i18n/fa-IR/app-overview.ts
@@ -0,0 +1,156 @@
+const translation = {
+ welcome: {
+ firstStepTip: 'برای شروع،',
+ enterKeyTip: 'کلید API خود را در زیر وارد کنید',
+ getKeyTip: 'کلید API خود را از داشبورد OpenAI دریافت کنید',
+ placeholder: 'کلید API خود را وارد کنید (مثلاً sk-xxxx)',
+ },
+ apiKeyInfo: {
+ cloud: {
+ trial: {
+ title: 'شما از سهمیه آزمایشی {{providerName}} استفاده میکنید.',
+ description: 'سهمیه آزمایشی برای اهداف تست شما ارائه شده است. قبل از اینکه سهمیه آزمایشی تمام شود، لطفاً ارائهدهنده مدل خود را تنظیم کنید یا سهمیه اضافی خریداری کنید.',
+ },
+ exhausted: {
+ title: 'سهمیه آزمایشی شما تمام شده است، لطفاً کلید API خود را تنظیم کنید.',
+ description: 'شما سهمیه آزمایشی خود را مصرف کردهاید. لطفاً ارائهدهنده مدل خود را تنظیم کنید یا سهمیه اضافی خریداری کنید.',
+ },
+ },
+ selfHost: {
+ title: {
+ row1: 'برای شروع،',
+ row2: 'ابتدا ارائهدهنده مدل خود را تنظیم کنید.',
+ },
+ },
+ callTimes: 'تعداد تماسها',
+ usedToken: 'توکنهای مصرفشده',
+ setAPIBtn: 'برو به تنظیمات ارائهدهنده مدل',
+ tryCloud: 'یا نسخه ابری Dify با سهمیه رایگان را امتحان کنید',
+ },
+ overview: {
+ title: 'نمای کلی',
+ appInfo: {
+ explanation: 'برنامه وب AI آماده به کار',
+ accessibleAddress: 'آدرس عمومی',
+ preview: 'پیشنمایش',
+ regenerate: 'تولید مجدد',
+ regenerateNotice: 'آیا میخواهید آدرس عمومی را دوباره تولید کنید؟',
+ preUseReminder: 'لطفاً قبل از ادامه، WebApp را فعال کنید.',
+ settings: {
+ entry: 'تنظیمات',
+ title: 'تنظیمات WebApp',
+ webName: 'نام WebApp',
+ webDesc: 'توضیحات WebApp',
+ webDescTip: 'این متن در سمت مشتری نمایش داده میشود و راهنماییهای اولیه در مورد نحوه استفاده از برنامه را ارائه میدهد',
+ webDescPlaceholder: 'توضیحات WebApp را وارد کنید',
+ language: 'زبان',
+ workflow: {
+ title: 'مراحل کاری',
+ show: 'نمایش',
+ hide: 'مخفی کردن',
+ },
+ chatColorTheme: 'تم رنگی چت',
+ chatColorThemeDesc: 'تم رنگی چتبات را تنظیم کنید',
+ chatColorThemeInverted: 'معکوس',
+ invalidHexMessage: 'مقدار هگز نامعتبر',
+ more: {
+ entry: 'نمایش تنظیمات بیشتر',
+ copyright: 'حق نسخهبرداری',
+ copyRightPlaceholder: 'نام نویسنده یا سازمان را وارد کنید',
+ privacyPolicy: 'سیاست حفظ حریم خصوصی',
+ privacyPolicyPlaceholder: 'لینک سیاست حفظ حریم خصوصی را وارد کنید',
+ privacyPolicyTip: 'به بازدیدکنندگان کمک میکند تا بفهمند برنامه چه دادههایی را جمعآوری میکند، به سیاست حفظ حریم خصوصی Dify نگاه کنید Privacy Policy .',
+ customDisclaimer: 'سلب مسئولیت سفارشی',
+ customDisclaimerPlaceholder: 'متن سلب مسئولیت سفارشی را وارد کنید',
+ customDisclaimerTip: 'متن سلب مسئولیت سفارشی در سمت مشتری نمایش داده میشود و اطلاعات بیشتری درباره برنامه ارائه میدهد',
+ },
+ },
+ embedded: {
+ entry: 'جاسازی شده',
+ title: 'جاسازی در وبسایت',
+ explanation: 'روشهای جاسازی برنامه چت در وبسایت خود را انتخاب کنید',
+ iframe: 'برای افزودن برنامه چت در هرجای وبسایت خود، این iframe را به کد HTML خود اضافه کنید.',
+ scripts: 'برای افزودن برنامه چت به گوشه پایین سمت راست وبسایت خود، این کد را به HTML خود اضافه کنید.',
+ chromePlugin: 'نصب افزونه Chrome Chatbot Dify',
+ copied: 'کپی شد',
+ copy: 'کپی',
+ },
+ qrcode: {
+ title: 'کد QR لینک',
+ scan: 'اسکن برای اشتراکگذاری',
+ download: 'دانلود کد QR',
+ },
+ customize: {
+ way: 'راه',
+ entry: 'سفارشیسازی',
+ title: 'سفارشیسازی WebApp AI',
+ explanation: 'شما میتوانید ظاهر جلویی برنامه وب را برای برآوردن نیازهای سناریو و سبک خود سفارشی کنید.',
+ way1: {
+ name: 'کلاینت را شاخه کنید، آن را تغییر دهید و در Vercel مستقر کنید (توصیه میشود)',
+ step1: 'کلاینت را شاخه کنید و آن را تغییر دهید',
+ step1Tip: 'برای شاخه کردن کد منبع به حساب GitHub خود و تغییر کد اینجا کلیک کنید',
+ step1Operation: 'Dify-WebClient',
+ step2: 'استقرار در Vercel',
+ step2Tip: 'برای وارد کردن مخزن به Vercel و استقرار اینجا کلیک کنید',
+ step2Operation: 'وارد کردن مخزن',
+ step3: 'پیکربندی متغیرهای محیطی',
+ step3Tip: 'متغیرهای محیطی زیر را در Vercel اضافه کنید',
+ },
+ way2: {
+ name: 'نوشتن کد سمت کلاینت برای فراخوانی API و استقرار آن بر روی سرور',
+ operation: 'مستندات',
+ },
+ },
+ },
+ apiInfo: {
+ title: 'API سرویس بکاند',
+ explanation: 'به راحتی در برنامه خود یکپارچه میشود',
+ accessibleAddress: 'نقطه پایانی سرویس API',
+ doc: 'مرجع API',
+ },
+ status: {
+ running: 'در حال سرویسدهی',
+ disable: 'غیرفعال',
+ },
+ },
+ analysis: {
+ title: 'تحلیل',
+ ms: 'میلیثانیه',
+ tokenPS: 'توکن/ثانیه',
+ totalMessages: {
+ title: 'کل پیامها',
+ explanation: 'تعداد تعاملات روزانه با AI؛ مهندسی/اشکالزدایی دستورات مستثنی هستند.',
+ },
+ activeUsers: {
+ title: 'کاربران فعال',
+ explanation: 'کاربران منحصر به فردی که در پرسش و پاسخ با AI شرکت میکنند؛ مهندسی/اشکالزدایی دستورات مستثنی هستند.',
+ },
+ tokenUsage: {
+ title: 'استفاده از توکن',
+ explanation: 'مصرف روزانه توکنهای مدل زبان برای برنامه را نشان میدهد، که برای کنترل هزینهها مفید است.',
+ consumed: 'مصرفشده',
+ },
+ avgSessionInteractions: {
+ title: 'میانگین تعاملات جلسه',
+ explanation: 'تعداد تعاملات پیوسته کاربر-AI؛ برای برنامههای مبتنی بر گفتگو.',
+ },
+ avgUserInteractions: {
+ title: 'میانگین تعاملات کاربران',
+ explanation: 'تکرار استفاده روزانه کاربران را نشان میدهد. این معیار چسبندگی کاربران را نشان میدهد.',
+ },
+ userSatisfactionRate: {
+ title: 'نرخ رضایت کاربران',
+ explanation: 'تعداد لایکها به ازای هر ۱۰۰۰ پیام. این نشاندهنده نسبت پاسخهایی است که کاربران به شدت رضایت دارند.',
+ },
+ avgResponseTime: {
+ title: 'میانگین زمان پاسخ',
+ explanation: 'زمان (میلیثانیه) برای پردازش/پاسخ AI؛ برای برنامههای مبتنی بر متن.',
+ },
+ tps: {
+ title: 'سرعت خروجی توکن',
+ explanation: 'عملکرد مدل زبان بزرگ را اندازهگیری میکند. سرعت خروجی توکنهای مدل زبان بزرگ از آغاز درخواست تا تکمیل خروجی را بشمارید.',
+ },
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/app.ts b/web/i18n/fa-IR/app.ts
new file mode 100644
index 00000000000000..8322ff5a14a158
--- /dev/null
+++ b/web/i18n/fa-IR/app.ts
@@ -0,0 +1,130 @@
+const translation = {
+ createApp: 'ایجاد برنامه',
+ types: {
+ all: 'همه',
+ chatbot: 'چتبات',
+ agent: 'نماینده',
+ workflow: 'گردش کار',
+ completion: 'تکمیل',
+ },
+ duplicate: 'تکرار',
+ duplicateTitle: 'تکرار برنامه',
+ export: 'صادر کردن DSL',
+ exportFailed: 'صادر کردن DSL ناموفق بود.',
+ importDSL: 'وارد کردن فایل DSL',
+ createFromConfigFile: 'ایجاد از فایل DSL',
+ importFromDSL: 'وارد کردن از DSL',
+ importFromDSLFile: 'از فایل DSL',
+ importFromDSLUrl: 'از URL',
+ importFromDSLUrlPlaceholder: 'لینک DSL را اینجا بچسبانید',
+ deleteAppConfirmTitle: 'آیا این برنامه حذف شود؟',
+ deleteAppConfirmContent:
+ 'حذف برنامه غیرقابل برگشت است. کاربران دیگر قادر به دسترسی به برنامه شما نخواهند بود و تمام تنظیمات و گزارشات درخواستها به صورت دائم حذف خواهند شد.',
+ appDeleted: 'برنامه حذف شد',
+ appDeleteFailed: 'حذف برنامه ناموفق بود',
+ join: 'پیوستن به جامعه',
+ communityIntro:
+ 'در کانالهای مختلف با اعضای تیم، مشارکتکنندگان و توسعهدهندگان بحث کنید.',
+ roadmap: 'نقشه راه ما را ببینید',
+ newApp: {
+ startFromBlank: 'ایجاد از خالی',
+ startFromTemplate: 'ایجاد از قالب',
+ captionAppType: 'چه نوع برنامهای میخواهید ایجاد کنید؟',
+ chatbotDescription: 'ساخت برنامهای مبتنی بر چت. این برنامه از قالب پرسش و پاسخ استفاده میکند و امکان چندین دور مکالمه مداوم را فراهم میکند.',
+ completionDescription: 'ساخت برنامهای که متن با کیفیت بالا بر اساس درخواستها تولید میکند، مانند تولید مقالات، خلاصهها، ترجمهها و بیشتر.',
+ completionWarning: 'این نوع برنامه دیگر پشتیبانی نمیشود.',
+ agentDescription: 'ساخت نماینده هوشمند که میتواند ابزارها را برای انجام وظایف به طور خودمختار انتخاب کند',
+ workflowDescription: 'ساخت برنامهای که متن با کیفیت بالا بر اساس گردش کار با درجه بالای سفارشیسازی تولید میکند. مناسب برای کاربران با تجربه.',
+ workflowWarning: 'در حال حاضر در نسخه بتا',
+ chatbotType: 'روش سازماندهی چتبات',
+ basic: 'اساسی',
+ basicTip: 'برای مبتدیان، میتوان بعداً به Chatflow تغییر داد',
+ basicFor: 'برای مبتدیان',
+ basicDescription: 'سازماندهی اساسی به شما اجازه میدهد تا یک برنامه چتبات را با تنظیمات ساده و بدون امکان تغییر درخواستهای داخلی سازماندهی کنید. مناسب برای مبتدیان است.',
+ advanced: 'Chatflow',
+ advancedFor: 'برای کاربران پیشرفته',
+ advancedDescription: 'سازماندهی گردش کار، چتباتها را به صورت گردش کار سازماندهی میکند و درجه بالایی از سفارشیسازی، از جمله امکان ویرایش درخواستهای داخلی را فراهم میکند. مناسب برای کاربران با تجربه است.',
+ captionName: 'آیکون و نام برنامه',
+ appNamePlaceholder: 'به برنامه خود یک نام بدهید',
+ captionDescription: 'توضیحات',
+ appDescriptionPlaceholder: 'توضیحات برنامه را وارد کنید',
+ useTemplate: 'استفاده از این قالب',
+ previewDemo: 'پیشنمایش دمو',
+ chatApp: 'دستیار',
+ chatAppIntro:
+ 'میخواهم یک برنامه مبتنی بر چت بسازم. این برنامه از قالب پرسش و پاسخ استفاده میکند و امکان چندین دور مکالمه مداوم را فراهم میکند.',
+ agentAssistant: 'دستیار نماینده جدید',
+ completeApp: 'تولید کننده متن',
+ completeAppIntro:
+ 'میخواهم برنامهای بسازم که متن با کیفیت بالا بر اساس درخواستها تولید کند، مانند تولید مقالات، خلاصهها، ترجمهها و بیشتر.',
+ showTemplates: 'میخواهم از یک قالب انتخاب کنم',
+ hideTemplates: 'بازگشت به انتخاب حالت',
+ Create: 'ایجاد',
+ Cancel: 'لغو',
+ nameNotEmpty: 'نام نمیتواند خالی باشد',
+ appTemplateNotSelected: 'لطفاً یک قالب انتخاب کنید',
+ appTypeRequired: 'لطفاً نوع برنامه را انتخاب کنید',
+ appCreated: 'برنامه ایجاد شد',
+ appCreateFailed: 'ایجاد برنامه ناموفق بود',
+ },
+ editApp: 'ویرایش اطلاعات',
+ editAppTitle: 'ویرایش اطلاعات برنامه',
+ editDone: 'اطلاعات برنامه بهروزرسانی شد',
+ editFailed: 'بهروزرسانی اطلاعات برنامه ناموفق بود',
+ emoji: {
+ ok: 'باشه',
+ cancel: 'لغو',
+ },
+ switch: 'تغییر به سازماندهی گردش کار',
+ switchTipStart: 'یک نسخه جدید از برنامه برای شما ایجاد خواهد شد و نسخه جدید به سازماندهی گردش کار تغییر خواهد کرد. نسخه جدید ',
+ switchTip: 'اجازه نمیدهد',
+ switchTipEnd: ' تغییر به سازماندهی اساسی بازگردد.',
+ switchLabel: 'نسخه برنامه که ایجاد میشود',
+ removeOriginal: 'حذف برنامه اصلی',
+ switchStart: 'شروع تغییر',
+ typeSelector: {
+ all: 'همه انواع',
+ chatbot: 'چتبات',
+ agent: 'نماینده',
+ workflow: 'گردش کار',
+ completion: 'تکمیل',
+ },
+ tracing: {
+ title: 'ردیابی عملکرد برنامه',
+ description: 'پیکربندی ارائهدهنده شخص ثالث LLMOps و ردیابی عملکرد برنامه.',
+ config: 'پیکربندی',
+ collapse: 'بستن',
+ expand: 'باز کردن',
+ tracing: 'ردیابی',
+ disabled: 'غیرفعال',
+ disabledTip: 'لطفاً ابتدا ارائهدهنده را پیکربندی کنید',
+ enabled: 'در حال خدمت',
+ tracingDescription: 'ثبت کامل متن اجرای برنامه، از جمله تماسهای LLM، متن، درخواستهای HTTP و بیشتر، به یک پلتفرم ردیابی شخص ثالث.',
+ configProviderTitle: {
+ configured: 'پیکربندی شده',
+ notConfigured: 'برای فعالسازی ردیابی ارائهدهنده را پیکربندی کنید',
+ moreProvider: 'ارائهدهندگان بیشتر',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'یک پلتفرم همهکاره برای هر مرحله از چرخه عمر برنامههای مبتنی بر LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'ردیابی، ارزیابی، مدیریت درخواستها و معیارها برای رفع اشکال و بهبود برنامه LLM شما.',
+ },
+ inUse: 'در حال استفاده',
+ configProvider: {
+ title: 'پیکربندی',
+ placeholder: 'کلید {{key}} خود را وارد کنید',
+ project: 'پروژه',
+ publicKey: 'کلید عمومی',
+ secretKey: 'کلید محرمانه',
+ viewDocsLink: 'مشاهده مستندات {{key}}',
+ removeConfirmTitle: 'حذف پیکربندی {{key}}؟',
+ removeConfirmContent: 'پیکربندی فعلی در حال استفاده است، حذف آن ویژگی ردیابی را غیرفعال خواهد کرد.',
+ },
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/billing.ts b/web/i18n/fa-IR/billing.ts
new file mode 100644
index 00000000000000..480c31f742c3a4
--- /dev/null
+++ b/web/i18n/fa-IR/billing.ts
@@ -0,0 +1,118 @@
+const translation = {
+ currentPlan: 'طرح فعلی',
+ upgradeBtn: {
+ plain: 'ارتقاء طرح',
+ encourage: 'هم اکنون ارتقاء دهید',
+ encourageShort: 'ارتقاء دهید',
+ },
+ viewBilling: 'مدیریت صورتحسابها و اشتراکها',
+ buyPermissionDeniedTip: 'لطفاً با مدیر سازمان خود تماس بگیرید تا اشتراک تهیه کنید',
+ plansCommon: {
+ title: 'یک طرح مناسب برای خود انتخاب کنید',
+ yearlyTip: 'با اشتراک سالانه 2 ماه رایگان دریافت کنید!',
+ mostPopular: 'محبوبترین',
+ planRange: {
+ monthly: 'ماهانه',
+ yearly: 'سالانه',
+ },
+ month: 'ماه',
+ year: 'سال',
+ save: 'صرفهجویی کنید ',
+ free: 'رایگان',
+ currentPlan: 'طرح فعلی',
+ contractSales: 'تماس با فروش',
+ contractOwner: 'تماس با مدیر تیم',
+ startForFree: 'رایگان شروع کنید',
+ getStartedWith: 'شروع کنید با ',
+ contactSales: 'تماس با فروش',
+ talkToSales: 'صحبت با فروش',
+ modelProviders: 'ارائهدهندگان مدل',
+ teamMembers: 'اعضای تیم',
+ annotationQuota: 'سهمیه حاشیهنویسی',
+ buildApps: 'ساخت اپلیکیشنها',
+ vectorSpace: 'فضای وکتور',
+ vectorSpaceBillingTooltip: 'هر 1 مگابایت میتواند حدود 1.2 میلیون کاراکتر از دادههای وکتور شده را ذخیره کند (براساس تخمین با استفاده از OpenAI Embeddings، متفاوت بر اساس مدلها).',
+ vectorSpaceTooltip: 'فضای وکتور سیستم حافظه بلند مدت است که برای درک دادههای شما توسط LLMها مورد نیاز است.',
+ documentsUploadQuota: 'سهمیه بارگذاری مستندات',
+ documentProcessingPriority: 'اولویت پردازش مستندات',
+ documentProcessingPriorityTip: 'برای اولویت پردازش بالاتر مستندات، لطفاً طرح خود را ارتقاء دهید.',
+ documentProcessingPriorityUpgrade: 'دادههای بیشتری را با دقت بالاتر و سرعت بیشتر پردازش کنید.',
+ priority: {
+ 'standard': 'استاندارد',
+ 'priority': 'اولویت',
+ 'top-priority': 'اولویت بالا',
+ },
+ logsHistory: 'تاریخچه گزارشات',
+ customTools: 'ابزارهای سفارشی',
+ unavailable: 'غیرقابل دسترس',
+ days: 'روز',
+ unlimited: 'نامحدود',
+ support: 'پشتیبانی',
+ supportItems: {
+ communityForums: 'انجمنهای اجتماعی',
+ emailSupport: 'پشتیبانی ایمیل',
+ priorityEmail: 'پشتیبانی ایمیل و چت با اولویت',
+ logoChange: 'تغییر لوگو',
+ SSOAuthentication: 'تأیید هویت SSO',
+ personalizedSupport: 'پشتیبانی شخصیسازی شده',
+ dedicatedAPISupport: 'پشتیبانی API اختصاصی',
+ customIntegration: 'یکپارچهسازی و پشتیبانی سفارشی',
+ ragAPIRequest: 'درخواستهای API RAG',
+ bulkUpload: 'بارگذاری دستهای مستندات',
+ agentMode: 'حالت Agent',
+ workflow: 'جریان کار',
+ llmLoadingBalancing: 'توزیع بار LLM',
+ llmLoadingBalancingTooltip: 'اضافه کردن چندین کلید API به مدلها، به طور مؤثر از محدودیتهای نرخ API عبور میکند.',
+ },
+ comingSoon: 'به زودی',
+ member: 'عضو',
+ memberAfter: 'عضو',
+ messageRequest: {
+ title: 'اعتبارات پیام',
+ tooltip: 'سهمیههای فراخوانی پیام برای طرحهای مختلف با استفاده از مدلهای OpenAI (به جز gpt4). پیامهای بیش از حد محدودیت از کلید API OpenAI شما استفاده میکنند.',
+ },
+ annotatedResponse: {
+ title: 'محدودیتهای سهمیه حاشیهنویسی',
+ tooltip: 'ویرایش دستی و حاشیهنویسی پاسخها، قابلیتهای پرسش و پاسخ با کیفیت بالا و قابل تنظیم برای اپلیکیشنها را فراهم میکند. (فقط در اپلیکیشنهای چت اعمال میشود)',
+ },
+ ragAPIRequestTooltip: 'به تعداد درخواستهای API که فقط قابلیتهای پردازش پایگاه دانش Dify را فراخوانی میکنند اشاره دارد.',
+ receiptInfo: 'فقط صاحب تیم و مدیر تیم میتوانند اشتراک تهیه کنند و اطلاعات صورتحساب را مشاهده کنند',
+ },
+ plans: {
+ sandbox: {
+ name: 'محیط آزمایشی',
+ description: '200 بار آزمایش رایگان GPT',
+ includesTitle: 'شامل:',
+ },
+ professional: {
+ name: 'حرفهای',
+ description: 'برای افراد و تیمهای کوچک برای باز کردن قدرت بیشتر به طور مقرون به صرفه.',
+ includesTitle: 'همه چیز در طرح رایگان، به علاوه:',
+ },
+ team: {
+ name: 'تیم',
+ description: 'همکاری بدون محدودیت و لذت بردن از عملکرد برتر.',
+ includesTitle: 'همه چیز در طرح حرفهای، به علاوه:',
+ },
+ enterprise: {
+ name: 'سازمانی',
+ description: 'دریافت کاملترین قابلیتها و پشتیبانی برای سیستمهای بزرگ و بحرانی.',
+ includesTitle: 'همه چیز در طرح تیم، به علاوه:',
+ },
+ },
+ vectorSpace: {
+ fullTip: 'فضای وکتور پر است.',
+ fullSolution: 'طرح خود را ارتقاء دهید تا فضای بیشتری دریافت کنید.',
+ },
+ apps: {
+ fullTipLine1: 'طرح خود را ارتقاء دهید تا',
+ fullTipLine2: 'اپلیکیشنهای بیشتری بسازید.',
+ },
+ annotatedResponse: {
+ fullTipLine1: 'طرح خود را ارتقاء دهید تا',
+ fullTipLine2: 'مکالمات بیشتری را حاشیهنویسی کنید.',
+ quotaTitle: 'سهمیه پاسخ حاشیهنویسی',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/common.ts b/web/i18n/fa-IR/common.ts
new file mode 100644
index 00000000000000..2b1c4647db4eec
--- /dev/null
+++ b/web/i18n/fa-IR/common.ts
@@ -0,0 +1,572 @@
+const translation = {
+ api: {
+ success: 'موفقیت',
+ actionSuccess: 'عملیات موفق',
+ saved: 'ذخیره شد',
+ create: 'ایجاد شد',
+ remove: 'حذف شد',
+ },
+ operation: {
+ create: 'ایجاد',
+ confirm: 'تایید',
+ cancel: 'لغو',
+ clear: 'پاک کردن',
+ save: 'ذخیره',
+ saveAndEnable: 'ذخیره و فعال سازی',
+ edit: 'ویرایش',
+ add: 'افزودن',
+ added: 'اضافه شد',
+ refresh: 'شروع مجدد',
+ reset: 'بازنشانی',
+ search: 'جستجو',
+ change: 'تغییر',
+ remove: 'حذف',
+ send: 'ارسال',
+ copy: 'کپی',
+ lineBreak: 'خط جدید',
+ sure: 'مطمئن هستم',
+ download: 'دانلود',
+ delete: 'حذف',
+ settings: 'تنظیمات',
+ setup: 'راه اندازی',
+ getForFree: 'دریافت رایگان',
+ reload: 'بارگذاری مجدد',
+ ok: 'تایید',
+ log: 'گزارش',
+ learnMore: 'اطلاعات بیشتر',
+ params: 'پارامترها',
+ duplicate: 'تکرار',
+ rename: 'تغییر نام',
+ },
+ errorMsg: {
+ fieldRequired: '{{field}} الزامی است',
+ urlError: 'آدرس باید با http:// یا https:// شروع شود',
+ },
+ placeholder: {
+ input: 'لطفا وارد کنید',
+ select: 'لطفا انتخاب کنید',
+ },
+ voice: {
+ language: {
+ zhHans: 'چینی',
+ zhHant: 'چینی سنتی',
+ enUS: 'انگلیسی',
+ deDE: 'آلمانی',
+ frFR: 'فرانسوی',
+ esES: 'اسپانیایی',
+ itIT: 'ایتالیایی',
+ thTH: 'تایلندی',
+ idID: 'اندونزیایی',
+ jaJP: 'ژاپنی',
+ koKR: 'کرهای',
+ ptBR: 'پرتغالی',
+ ruRU: 'روسی',
+ ukUA: 'اوکراینی',
+ viVN: 'ویتنامی',
+ plPL: 'لهستانی',
+ },
+ },
+ unit: {
+ char: 'کاراکتر',
+ },
+ actionMsg: {
+ noModification: 'در حال حاضر تغییری وجود ندارد.',
+ modifiedSuccessfully: 'با موفقیت تغییر یافت',
+ modifiedUnsuccessfully: 'تغییر ناموفق بود',
+ copySuccessfully: 'با موفقیت کپی شد',
+ paySucceeded: 'پرداخت موفق',
+ payCancelled: 'پرداخت لغو شد',
+ generatedSuccessfully: 'با موفقیت تولید شد',
+ generatedUnsuccessfully: 'تولید ناموفق بود',
+ },
+ model: {
+ params: {
+ temperature: 'دما',
+ temperatureTip:
+ 'تصادفی بودن را کنترل میکند: کاهش آن منجر به تکمیلهای کمتر تصادفی میشود. با نزدیک شدن دما به صفر، مدل قطعی و تکراری میشود.',
+ top_p: 'بالاترین P',
+ top_pTip:
+ 'تنوع را از طریق نمونهگیری هسته کنترل میکند: 0.5 به این معنی است که نیمی از همه گزینههای وزندار احتمالی در نظر گرفته میشوند.',
+ presence_penalty: 'جریمه حضور',
+ presence_penaltyTip:
+ 'چقدر توکنهای جدید را بر اساس اینکه آیا در متن تاکنون ظاهر شدهاند جریمه کنیم.\nاحتمال مدل برای صحبت در مورد موضوعات جدید را افزایش میدهد.',
+ frequency_penalty: 'جریمه تکرار',
+ frequency_penaltyTip:
+ 'چقدر توکنهای جدید را بر اساس فراوانی موجود آنها در متن تاکنون جریمه کنیم.\nاحتمال تکرار دقیق همان خط توسط مدل را کاهش میدهد.',
+ max_tokens: 'حداکثر توکن',
+ max_tokensTip:
+ 'برای محدود کردن حداکثر طول پاسخ، در توکنها استفاده میشود. \nمقادیر بزرگتر ممکن است فضای باقیمانده برای کلمات راهنما، گزارشهای چت و دانش را محدود کند. \nتوصیه میشود آن را کمتر از دو سوم تنظیم کنید\ngpt-4-1106-preview، gpt-4-vision-preview حداکثر توکن (ورودی 128k خروجی 4k)',
+ maxTokenSettingTip: 'تنظیم حداکثر توکن شما بالاست، که ممکن است فضا را برای راهنماها، پرس و جوها و دادهها محدود کند. در نظر بگیرید آن را زیر 2/3 تنظیم کنید.',
+ setToCurrentModelMaxTokenTip: 'حداکثر توکن به 80٪ حداکثر توکن مدل فعلی {{maxToken}} بهروزرسانی شد.',
+ stop_sequences: 'توالیهای توقف',
+ stop_sequencesTip: 'حداکثر چهار توالی که API تولید توکنهای بیشتر را متوقف میکند. متن برگردانده شده شامل توالی توقف نخواهد بود.',
+ stop_sequencesPlaceholder: 'توالی را وارد کنید و Tab را فشار دهید',
+ },
+ tone: {
+ Creative: 'خلاقانه',
+ Balanced: 'متعادل',
+ Precise: 'دقیق',
+ Custom: 'سفارشی',
+ },
+ addMoreModel: 'برای افزودن مدلهای بیشتر به تنظیمات بروید',
+ },
+ menus: {
+ status: 'بتا',
+ explore: 'کاوش',
+ apps: 'استودیو',
+ plugins: 'افزونهها',
+ pluginsTips: 'افزونههای شخص ثالث را ادغام کنید یا افزونههای هوش مصنوعی سازگار با ChatGPT ایجاد کنید.',
+ datasets: 'دانش',
+ datasetsTips: 'به زودی: دادههای متنی خود را وارد کنید یا از طریق Webhook دادهها را در زمان واقعی برای بهبود زمینه LLM بنویسید.',
+ newApp: 'برنامه جدید',
+ newDataset: 'ایجاد دانش',
+ tools: 'ابزارها',
+ },
+ userProfile: {
+ settings: 'تنظیمات',
+ emailSupport: 'پشتیبانی ایمیل',
+ workspace: 'فضای کاری',
+ createWorkspace: 'ایجاد فضای کاری',
+ helpCenter: 'راهنما',
+ roadmapAndFeedback: 'بازخورد',
+ community: 'انجمن',
+ about: 'درباره',
+ logout: 'خروج',
+ },
+ settings: {
+ accountGroup: 'حساب کاربری',
+ workplaceGroup: 'فضای کاری',
+ account: 'حساب من',
+ members: 'اعضا',
+ billing: 'صورتحساب',
+ integrations: 'ادغامها',
+ language: 'زبان',
+ provider: 'ارائه دهنده مدل',
+ dataSource: 'منبع داده',
+ plugin: 'افزونهها',
+ apiBasedExtension: 'توسعه مبتنی بر API',
+ },
+ account: {
+ avatar: 'آواتار',
+ name: 'نام',
+ email: 'ایمیل',
+ password: 'رمز عبور',
+ passwordTip: 'اگر نمیخواهید از کدهای ورود موقت استفاده کنید، میتوانید یک رمز عبور دائمی تنظیم کنید',
+ setPassword: 'تنظیم رمز عبور',
+ resetPassword: 'بازنشانی رمز عبور',
+ currentPassword: 'رمز عبور فعلی',
+ newPassword: 'رمز عبور جدید',
+ confirmPassword: 'تأیید رمز عبور',
+ notEqual: 'دو رمز عبور متفاوت هستند.',
+ langGeniusAccount: 'حساب Dify',
+ langGeniusAccountTip: 'حساب Dify شما و دادههای کاربری مرتبط.',
+ editName: 'ویرایش نام',
+ showAppLength: 'نمایش {{length}} برنامه',
+ delete: 'حذف حساب کاربری',
+ deleteTip: 'حذف حساب کاربری شما تمام دادههای شما را به طور دائمی پاک میکند و قابل بازیابی نیست.',
+ deleteConfirmTip: 'برای تأیید، لطفاً موارد زیر را از ایمیل ثبتنام شده خود به این آدرس ارسال کنید ',
+ },
+ members: {
+ team: 'تیم',
+ invite: 'افزودن',
+ name: 'نام',
+ lastActive: 'آخرین فعالیت',
+ role: 'نقشها',
+ pending: 'در انتظار...',
+ owner: 'مالک',
+ admin: 'مدیر',
+ adminTip: 'میتواند برنامهها را بسازد و تنظیمات تیم را مدیریت کند',
+ normal: 'عادی',
+ normalTip: 'فقط میتواند از برنامهها استفاده کند، نمیتواند برنامه بسازد',
+ builder: 'سازنده',
+ builderTip: 'میتواند برنامههای خود را بسازد و ویرایش کند',
+ editor: 'ویرایشگر',
+ editorTip: 'میتواند برنامهها را بسازد و ویرایش کند',
+ datasetOperator: 'مدیر دانش',
+ datasetOperatorTip: 'فقط میتواند پایگاه دانش را مدیریت کند',
+ inviteTeamMember: 'افزودن عضو تیم',
+ inviteTeamMemberTip: 'آنها میتوانند پس از ورود به سیستم، مستقیماً به دادههای تیم شما دسترسی پیدا کنند.',
+ email: 'ایمیل',
+ emailInvalid: 'فرمت ایمیل نامعتبر است',
+ emailPlaceholder: 'لطفاً ایمیلها را وارد کنید',
+ sendInvite: 'ارسال دعوت',
+ invitedAsRole: 'به عنوان کاربر {{role}} دعوت شده',
+ invitationSent: 'دعوتنامه ارسال شد',
+ invitationSentTip: 'دعوتنامه ارسال شد و آنها میتوانند وارد Dify شوند تا به دادههای تیم شما دسترسی پیدا کنند.',
+ invitationLink: 'لینک دعوت',
+ failedinvitationEmails: 'کاربران زیر با موفقیت دعوت نشدند',
+ ok: 'تایید',
+ removeFromTeam: 'حذف از تیم',
+ removeFromTeamTip: 'دسترسی تیم را حذف میکند',
+ setAdmin: 'تنظیم به عنوان مدیر',
+ setMember: 'تنظیم به عنوان عضو عادی',
+ setBuilder: 'تنظیم به عنوان سازنده',
+ setEditor: 'تنظیم به عنوان ویرایشگر',
+ disinvite: 'لغو دعوت',
+ deleteMember: 'حذف عضو',
+ you: '(شما)',
+ },
+ integrations: {
+ connected: 'متصل شده',
+ google: 'گوگل',
+ googleAccount: 'ورود با حساب گوگل',
+ github: 'گیتهاب',
+ githubAccount: 'ورود با حساب گیتهاب',
+ connect: 'اتصال',
+ },
+ language: {
+ displayLanguage: 'زبان نمایش',
+ timezone: 'منطقه زمانی',
+ },
+ provider: {
+ apiKey: 'کلید API',
+ enterYourKey: 'کلید API خود را اینجا وارد کنید',
+ invalidKey: 'کلید API OpenAI نامعتبر است',
+ validatedError: 'اعتبارسنجی ناموفق بود: ',
+ validating: 'در حال اعتبارسنجی کلید...',
+ saveFailed: 'ذخیره کلید API ناموفق بود',
+ apiKeyExceedBill: 'این کلید API سهمیه موجود ندارد، لطفاً بخوانید',
+ addKey: 'افزودن کلید',
+ comingSoon: 'به زودی',
+ editKey: 'ویرایش',
+ invalidApiKey: 'کلید API نامعتبر',
+ azure: {
+ apiBase: 'پایه API',
+ apiBasePlaceholder: 'آدرس پایه API نقطه پایانی Azure OpenAI شما.',
+ apiKey: 'کلید API',
+ apiKeyPlaceholder: 'کلید API خود را اینجا وارد کنید',
+ helpTip: 'آشنایی با سرویس Azure OpenAI',
+ },
+ openaiHosted: {
+ openaiHosted: 'OpenAI میزبانی شده',
+ onTrial: 'در حال آزمایش',
+ exhausted: 'سهمیه تمام شده',
+ desc: 'سرویس میزبانی OpenAI ارائه شده توسط Dify به شما اجازه میدهد از مدلهایی مانند GPT-3.5 استفاده کنید. قبل از اتمام سهمیه آزمایشی خود، باید سایر ارائهدهندگان مدل را تنظیم کنید.',
+ callTimes: 'تعداد فراخوانی',
+ usedUp: 'سهمیه آزمایشی تمام شده است. ارائهدهنده مدل خود را اضافه کنید.',
+ useYourModel: 'در حال حاضر از ارائهدهنده مدل خود استفاده میکنید.',
+ close: 'بستن',
+ },
+ anthropicHosted: {
+ anthropicHosted: 'Anthropic Claude',
+ onTrial: 'در حال آزمایش',
+ exhausted: 'سهمیه تمام شده',
+ desc: 'مدل قدرتمند که در طیف گستردهای از وظایف از گفتگوی پیشرفته و تولید محتوای خلاقانه تا دستورالعملهای دقیق عالی عمل میکند.',
+ callTimes: 'تعداد فراخوانی',
+ usedUp: 'سهمیه آزمایشی تمام شده است. ارائهدهنده مدل خود را اضافه کنید.',
+ useYourModel: 'در حال حاضر از ارائهدهنده مدل خود استفاده میکنید.',
+ close: 'بستن',
+ },
+ anthropic: {
+ using: 'قابلیت تعبیه از این استفاده میکند',
+ enableTip: 'برای فعالسازی مدل Anthropic، ابتدا باید به OpenAI یا سرویس Azure OpenAI متصل شوید.',
+ notEnabled: 'فعال نشده',
+ keyFrom: 'کلید API خود را از Anthropic دریافت کنید',
+ },
+ encrypted: {
+ front: 'کلید API شما با استفاده از فناوری',
+ back: ' رمزگذاری و ذخیره خواهد شد.',
+ },
+ },
+ modelProvider: {
+ notConfigured: 'مدل سیستم هنوز به طور کامل پیکربندی نشده است و برخی از عملکردها ممکن است در دسترس نباشند.',
+ systemModelSettings: 'تنظیمات مدل سیستم',
+ systemModelSettingsLink: 'چرا تنظیم مدل سیستم ضروری است؟',
+ selectModel: 'مدل خود را انتخاب کنید',
+ setupModelFirst: 'لطفاً ابتدا مدل خود را تنظیم کنید',
+ systemReasoningModel: {
+ key: 'مدل استدلال سیستم',
+ tip: 'مدل استنتاج پیشفرض را برای ایجاد برنامهها تنظیم کنید. ویژگیهایی مانند تولید نام گفتگو و پیشنهاد سوال بعدی نیز از مدل استنتاج پیشفرض استفاده خواهند کرد.',
+ },
+ embeddingModel: {
+ key: 'مدل تعبیه',
+ tip: 'مدل پیشفرض را برای پردازش تعبیه اسناد دانش تنظیم کنید. هر دو بازیابی و وارد کردن دانش از این مدل تعبیه برای پردازش برداری استفاده میکنند. تغییر باعث ناسازگاری بُعد برداری بین دانش وارد شده و سوال میشود که منجر به شکست بازیابی میشود. برای جلوگیری از شکست بازیابی، لطفاً این مدل را به دلخواه تغییر ندهید.',
+ required: 'مدل تعبیه الزامی است',
+ },
+ speechToTextModel: {
+ key: 'مدل تبدیل گفتار به متن',
+ tip: 'مدل پیشفرض را برای ورودی گفتار به متن در مکالمه تنظیم کنید.',
+ },
+ ttsModel: {
+ key: 'مدل تبدیل متن به گفتار',
+ tip: 'مدل پیشفرض را برای ورودی متن به گفتار در مکالمه تنظیم کنید.',
+ },
+ rerankModel: {
+ key: 'مدل رتبهبندی مجدد',
+ tip: 'مدل رتبهبندی مجدد، لیست اسناد کاندید را بر اساس تطابق معنایی با پرسش کاربر مرتب میکند و نتایج رتبهبندی معنایی را بهبود میبخشد',
+ },
+ apiKey: 'کلید API',
+ quota: 'سهمیه',
+ searchModel: 'جستجوی مدل',
+ noModelFound: 'هیچ مدلی برای {{model}} یافت نشد',
+ models: 'مدلها',
+ showMoreModelProvider: 'نمایش ارائهدهندگان مدل بیشتر',
+ selector: {
+ tip: 'این مدل حذف شده است. لطفاً یک مدل اضافه کنید یا مدل دیگری را انتخاب کنید.',
+ emptyTip: 'هیچ مدل موجودی وجود ندارد',
+ emptySetting: 'لطفاً به تنظیمات بروید تا پیکربندی کنید',
+ rerankTip: 'لطفاً مدل رتبهبندی مجدد را تنظیم کنید',
+ },
+ card: {
+ quota: 'سهمیه',
+ onTrial: 'در حال آزمایش',
+ paid: 'پرداخت شده',
+ quotaExhausted: 'سهمیه تمام شده',
+ callTimes: 'تعداد فراخوانی',
+ tokens: 'توکنها',
+ buyQuota: 'خرید سهمیه',
+ priorityUse: 'استفاده با اولویت',
+ removeKey: 'حذف کلید API',
+ tip: 'اولویت به سهمیه پرداخت شده داده میشود. سهمیه آزمایشی پس از اتمام سهمیه پرداخت شده استفاده خواهد شد.',
+ },
+ item: {
+ deleteDesc: '{{modelName}} به عنوان مدلهای استدلال سیستم استفاده میشوند. برخی از عملکردها پس از حذف در دسترس نخواهند بود. لطفاً تأیید کنید.',
+ freeQuota: 'سهمیه رایگان',
+ },
+ addApiKey: 'کلید API خود را اضافه کنید',
+ invalidApiKey: 'کلید API نامعتبر',
+ encrypted: {
+ front: 'کلید API شما با استفاده از فناوری',
+ back: ' رمزگذاری و ذخیره خواهد شد.',
+ },
+ freeQuota: {
+ howToEarn: 'چگونه کسب کنیم',
+ },
+ addMoreModelProvider: 'افزودن ارائهدهنده مدل بیشتر',
+ addModel: 'افزودن مدل',
+ modelsNum: '{{num}} مدل',
+ showModels: 'نمایش مدلها',
+ showModelsNum: 'نمایش {{num}} مدل',
+ collapse: 'جمع کردن',
+ config: 'پیکربندی',
+ modelAndParameters: 'مدل و پارامترها',
+ model: 'مدل',
+ featureSupported: '{{feature}} پشتیبانی میشود',
+ callTimes: 'تعداد فراخوانی',
+ credits: 'اعتبار پیام',
+ buyQuota: 'خرید سهمیه',
+ getFreeTokens: 'دریافت توکنهای رایگان',
+ priorityUsing: 'استفاده با اولویت',
+ deprecated: 'منسوخ شده',
+ confirmDelete: 'تأیید حذف؟',
+ quotaTip: 'توکنهای رایگان باقیمانده در دسترس',
+ loadPresets: 'بارگیری تنظیمات از پیش تعیین شده',
+ parameters: 'پارامترها',
+ loadBalancing: 'تعادل بار',
+ loadBalancingDescription: 'کاهش فشار با چندین مجموعه اعتبارنامه.',
+ loadBalancingHeadline: 'تعادل بار',
+ configLoadBalancing: 'پیکربندی تعادل بار',
+ modelHasBeenDeprecated: 'این مدل منسوخ شده است',
+ providerManaged: 'مدیریت شده توسط ارائهدهنده',
+ providerManagedDescription: 'استفاده از مجموعه واحد اعتبارنامه ارائه شده توسط ارائهدهنده مدل.',
+ defaultConfig: 'پیکربندی پیشفرض',
+ apiKeyStatusNormal: 'وضعیت کلید API عادی است',
+ apiKeyRateLimit: 'محدودیت نرخ به دست آمد، پس از {{seconds}} ثانیه در دسترس خواهد بود',
+ addConfig: 'افزودن پیکربندی',
+ editConfig: 'ویرایش پیکربندی',
+ loadBalancingLeastKeyWarning: 'برای فعال کردن تعادل بار، حداقل 2 کلید باید فعال باشند.',
+ loadBalancingInfo: 'به طور پیشفرض، تعادل بار از استراتژی Round-robin استفاده میکند. اگر محدودیت نرخ فعال شود، یک دوره خنک شدن 1 دقیقهای اعمال خواهد شد.',
+ upgradeForLoadBalancing: 'برای فعال کردن تعادل بار، طرح خود را ارتقا دهید.',
+ },
+ dataSource: {
+ add: 'افزودن منبع داده',
+ connect: 'اتصال',
+ configure: 'پیکربندی',
+ notion: {
+ title: 'نوشن',
+ description: 'استفاده از نوشن به عنوان منبع داده برای دانش.',
+ connectedWorkspace: 'فضای کاری متصل',
+ addWorkspace: 'افزودن فضای کاری',
+ connected: 'متصل شده',
+ disconnected: 'قطع شده',
+ changeAuthorizedPages: 'تغییر صفحات مجاز',
+ pagesAuthorized: 'صفحات مجاز',
+ sync: 'همگامسازی',
+ remove: 'حذف',
+ selector: {
+ pageSelected: 'صفحات انتخاب شده',
+ searchPages: 'جستجوی صفحات...',
+ noSearchResult: 'نتیجه جستجویی یافت نشد',
+ addPages: 'افزودن صفحات',
+ preview: 'پیشنمایش',
+ },
+ },
+ website: {
+ title: 'وبسایت',
+ description: 'وارد کردن محتوا از وبسایتها با استفاده از خزنده وب.',
+ with: 'با',
+ configuredCrawlers: 'خزندههای پیکربندی شده',
+ active: 'فعال',
+ inactive: 'غیرفعال',
+ },
+ },
+ plugin: {
+ serpapi: {
+ apiKey: 'کلید API',
+ apiKeyPlaceholder: 'کلید API خود را وارد کنید',
+ keyFrom: 'کلید SerpAPI خود را از صفحه حساب SerpAPI دریافت کنید',
+ },
+ },
+ apiBasedExtension: {
+ title: 'افزونههای مبتنی بر API مدیریت متمرکز API را فراهم میکنند و پیکربندی را برای استفاده آسان در برنامههای Dify ساده میکنند.',
+ link: 'نحوه توسعه افزونه API خود را بیاموزید.',
+ linkUrl: 'https://docs.dify.ai/features/extension/api_based_extension',
+ add: 'افزودن افزونه API',
+ selector: {
+ title: 'افزونه API',
+ placeholder: 'لطفاً افزونه API را انتخاب کنید',
+ manage: 'مدیریت افزونه API',
+ },
+ modal: {
+ title: 'افزودن افزونه API',
+ editTitle: 'ویرایش افزونه API',
+ name: {
+ title: 'نام',
+ placeholder: 'لطفاً نام را وارد کنید',
+ },
+ apiEndpoint: {
+ title: 'نقطه پایانی API',
+ placeholder: 'لطفاً نقطه پایانی API را وارد کنید',
+ },
+ apiKey: {
+ title: 'کلید API',
+ placeholder: 'لطفاً کلید API را وارد کنید',
+ lengthError: 'طول کلید API نمیتواند کمتر از ۵ کاراکتر باشد',
+ },
+ },
+ type: 'نوع',
+ },
+ about: {
+ changeLog: 'تغییرات',
+ updateNow: 'بهروزرسانی اکنون',
+ nowAvailable: 'Dify {{version}} اکنون در دسترس است.',
+ latestAvailable: 'Dify {{version}} آخرین نسخه در دسترس است.',
+ },
+ appMenus: {
+ overview: 'نظارت',
+ promptEng: 'هماهنگسازی',
+ apiAccess: 'دسترسی API',
+ logAndAnn: 'گزارشها و اعلانات',
+ logs: 'گزارشها',
+ },
+ environment: {
+ testing: 'آزمایشی',
+ development: 'توسعه',
+ },
+ appModes: {
+ completionApp: 'تولیدکننده متن',
+ chatApp: 'برنامه چت',
+ },
+ datasetMenus: {
+ documents: 'اسناد',
+ hitTesting: 'آزمایش بازیابی',
+ settings: 'تنظیمات',
+ emptyTip: 'دانش مرتبط نشده است، لطفاً به برنامه یا افزونه بروید تا ارتباط را کامل کنید.',
+ viewDoc: 'مشاهده مستندات',
+ relatedApp: 'برنامههای مرتبط',
+ },
+ voiceInput: {
+ speaking: 'اکنون صحبت کنید...',
+ converting: 'در حال تبدیل به متن...',
+ notAllow: 'میکروفون مجاز نیست',
+ },
+ modelName: {
+ 'gpt-3.5-turbo': 'جیپیتی-۳.۵-توربو',
+ 'gpt-3.5-turbo-16k': 'جیپیتی-۳.۵-توربو-۱۶کا',
+ 'gpt-4': 'جیپیتی-۴',
+ 'gpt-4-32k': 'جیپیتی-۴-۳۲کا',
+ 'text-davinci-003': 'متن-داوینچی-۰۰۳',
+ 'text-embedding-ada-002': 'متن-تعبیه-آدا-۰۰۲',
+ 'whisper-1': 'ویسپر-۱',
+ 'claude-instant-1': 'کلاود-فوری',
+ 'claude-2': 'کلاود-۲',
+ },
+ chat: {
+ renameConversation: 'تغییر نام مکالمه',
+ conversationName: 'نام مکالمه',
+ conversationNamePlaceholder: 'لطفاً نام مکالمه را وارد کنید',
+ conversationNameCanNotEmpty: 'نام مکالمه الزامی است',
+ citation: {
+ title: 'استنادها',
+ linkToDataset: 'پیوند به دانش',
+ characters: 'کاراکترها:',
+ hitCount: 'تعداد بازیابی:',
+ vectorHash: 'هش بردار:',
+ hitScore: 'امتیاز بازیابی:',
+ },
+ },
+ promptEditor: {
+ placeholder: 'دستور خود را اینجا بنویسید، «{» را وارد کنید تا یک متغیر درج کنید، «/» را وارد کنید تا یک بلوک محتوای دستور درج کنید',
+ context: {
+ item: {
+ title: 'زمینه',
+ desc: 'درج الگوی زمینه',
+ },
+ modal: {
+ title: '{{num}} دانش در زمینه',
+ add: 'افزودن زمینه',
+ footer: 'شما میتوانید زمینهها را در بخش زمینه در زیر مدیریت کنید.',
+ },
+ },
+ history: {
+ item: {
+ title: 'تاریخچه مکالمه',
+ desc: 'درج الگوی پیام تاریخی',
+ },
+ modal: {
+ title: 'مثال',
+ user: 'سلام',
+ assistant: 'سلام! چطور میتوانم امروز به شما کمک کنم؟',
+ edit: 'ویرایش نامهای نقش مکالمه',
+ },
+ },
+ variable: {
+ item: {
+ title: 'متغیرها و ابزارهای خارجی',
+ desc: 'درج متغیرها و ابزارهای خارجی',
+ },
+ outputToolDisabledItem: {
+ title: 'متغیرها',
+ desc: 'درج متغیرها',
+ },
+ modal: {
+ add: 'متغیر جدید',
+ addTool: 'ابزار جدید',
+ },
+ },
+ query: {
+ item: {
+ title: 'پرسوجو',
+ desc: 'درج الگوی پرسوجوی کاربر',
+ },
+ },
+ existed: 'در حال حاضر در دستور وجود دارد',
+ },
+ imageUploader: {
+ uploadFromComputer: 'بارگذاری از کامپیوتر',
+ uploadFromComputerReadError: 'خواندن تصویر ناموفق بود، لطفاً دوباره تلاش کنید.',
+ uploadFromComputerUploadError: 'بارگذاری تصویر ناموفق بود، لطفاً دوباره بارگذاری کنید.',
+ uploadFromComputerLimit: 'بارگذاری تصاویر نمیتواند از {{size}} مگابایت بیشتر باشد',
+ pasteImageLink: 'پیوند تصویر را بچسبانید',
+ pasteImageLinkInputPlaceholder: 'پیوند تصویر را اینجا بچسبانید',
+ pasteImageLinkInvalid: 'پیوند تصویر نامعتبر',
+ imageUpload: 'بارگذاری تصویر',
+ },
+ tag: {
+ placeholder: 'همه برچسبها',
+ addNew: 'افزودن برچسب جدید',
+ noTag: 'بدون برچسب',
+ noTagYet: 'هنوز برچسبی وجود ندارد',
+ addTag: 'افزودن برچسبها',
+ editTag: 'ویرایش برچسبها',
+ manageTags: 'مدیریت برچسبها',
+ selectorPlaceholder: 'برای جستجو یا ایجاد تایپ کنید',
+ create: 'ایجاد',
+ delete: 'حذف برچسب',
+ deleteTip: 'برچسب در حال استفاده است، آیا آن را حذف میکنید؟',
+ created: 'برچسب با موفقیت ایجاد شد',
+ failed: 'ایجاد برچسب ناموفق بود',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/custom.ts b/web/i18n/fa-IR/custom.ts
new file mode 100644
index 00000000000000..bcf3f26150a351
--- /dev/null
+++ b/web/i18n/fa-IR/custom.ts
@@ -0,0 +1,30 @@
+const translation = {
+ custom: 'سفارشی سازی',
+ upgradeTip: {
+ prefix: 'طرح خود را ارتقا دهید به',
+ suffix: 'تا برند خود را سفارشی کنید.',
+ },
+ webapp: {
+ title: 'سفارشی سازی برند وب اپ',
+ removeBrand: 'حذف "Powered by Dify"',
+ changeLogo: 'تغییر تصویر برند "Powered by"',
+ changeLogoTip: 'فرمت SVG یا PNG با حداقل اندازه 40x40px',
+ },
+ app: {
+ title: 'سفارشی سازی برند هدر اپلیکیشن',
+ changeLogoTip: 'فرمت SVG یا PNG با حداقل اندازه 80x80px',
+ },
+ upload: 'بارگذاری',
+ uploading: 'در حال بارگذاری',
+ uploadedFail: 'بارگذاری تصویر ناموفق بود، لطفاً دوباره بارگذاری کنید.',
+ change: 'تغییر',
+ apply: 'اعمال',
+ restore: 'بازگرداندن به پیشفرضها',
+ customize: {
+ contactUs: ' با ما تماس بگیرید ',
+ prefix: 'برای سفارشیسازی لوگوی برند در اپلیکیشن، لطفاً',
+ suffix: 'برای ارتقا به نسخه Enterprise.',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/dataset-creation.ts b/web/i18n/fa-IR/dataset-creation.ts
new file mode 100644
index 00000000000000..f8483af1407504
--- /dev/null
+++ b/web/i18n/fa-IR/dataset-creation.ts
@@ -0,0 +1,161 @@
+const translation = {
+ steps: {
+ header: {
+ creation: 'ایجاد دانش',
+ update: 'افزودن داده',
+ },
+ one: 'انتخاب منبع داده',
+ two: 'پیشپردازش و پاکسازی متن',
+ three: 'اجرا و پایان',
+ },
+ error: {
+ unavailable: 'این دانش در دسترس نیست',
+ },
+ firecrawl: {
+ configFirecrawl: 'پیکربندی fireFirecrawl',
+ apiKeyPlaceholder: 'کلید API از firecrawl.dev',
+ getApiKeyLinkText: 'کلید API خود را از firecrawl.dev دریافت کنید',
+ },
+ stepOne: {
+ filePreview: 'پیشنمایش فایل',
+ pagePreview: 'پیشنمایش صفحه',
+ dataSourceType: {
+ file: 'وارد کردن از فایل',
+ notion: 'همگامسازی از Notion',
+ web: 'همگامسازی از وبسایت',
+ },
+ uploader: {
+ title: 'بارگذاری فایل',
+ button: 'کشیدن و رها کردن فایل، یا',
+ browse: 'مرور',
+ tip: 'پشتیبانی از {{supportTypes}}. حداکثر {{size}}MB هر کدام.',
+ validation: {
+ typeError: 'نوع فایل پشتیبانی نمیشود',
+ size: 'فایل خیلی بزرگ است. حداکثر {{size}}MB',
+ count: 'چندین فایل پشتیبانی نمیشود',
+ filesNumber: 'شما به حد مجاز بارگذاری دستهای {{filesNumber}} رسیدهاید.',
+ },
+ cancel: 'لغو',
+ change: 'تغییر',
+ failed: 'بارگذاری ناموفق بود',
+ },
+ notionSyncTitle: 'Notion متصل نیست',
+ notionSyncTip: 'برای همگامسازی با Notion، ابتدا باید اتصال به Notion برقرار شود.',
+ connect: 'رفتن به اتصال',
+ button: 'بعدی',
+ emptyDatasetCreation: 'میخواهم یک دانش خالی ایجاد کنم',
+ modal: {
+ title: 'ایجاد یک دانش خالی',
+ tip: 'یک دانش خالی هیچ سندی نخواهد داشت و شما میتوانید هر زمان اسناد را بارگذاری کنید.',
+ input: 'نام دانش',
+ placeholder: 'لطفاً وارد کنید',
+ nameNotEmpty: 'نام نمیتواند خالی باشد',
+ nameLengthInvaild: 'نام باید بین 1 تا 40 کاراکتر باشد',
+ cancelButton: 'لغو',
+ confirmButton: 'ایجاد',
+ failed: 'ایجاد ناموفق بود',
+ },
+ website: {
+ fireCrawlNotConfigured: 'Firecrawl پیکربندی نشده است',
+ fireCrawlNotConfiguredDescription: 'برای استفاده از Firecrawl با کلید API پیکربندی کنید.',
+ configure: 'پیکربندی',
+ run: 'اجرا',
+ firecrawlTitle: 'استخراج محتوای وب با fireFirecrawl',
+ firecrawlDoc: 'مستندات Firecrawl',
+ firecrawlDocLink: 'https://docs.dify.ai/guides/knowledge-base/sync-from-website ',
+ options: 'گزینهها',
+ crawlSubPage: 'خزش صفحات فرعی',
+ limit: 'محدودیت',
+ maxDepth: 'حداکثر عمق',
+ excludePaths: 'مسیرهای مستثنی',
+ includeOnlyPaths: 'فقط مسیرهای شامل',
+ extractOnlyMainContent: 'فقط محتوای اصلی را استخراج کنید (بدون هدرها، ناوبریها، پاورقیها و غیره)',
+ exceptionErrorTitle: 'یک استثنا در حین اجرای کار Firecrawl رخ داد:',
+ unknownError: 'خطای ناشناخته',
+ totalPageScraped: 'کل صفحات خراشیده شده:',
+ selectAll: 'انتخاب همه',
+ resetAll: 'بازنشانی همه',
+ scrapTimeInfo: 'در مجموع {{total}} صفحه در {{time}} ثانیه خراشیده شد',
+ preview: 'پیشنمایش',
+ maxDepthTooltip: 'حداکثر عمق برای خزش نسبت به URL وارد شده. عمق 0 فقط صفحه URL وارد شده را خراش میدهد، عمق 1 URL و همه چیز بعد از URL وارد شده + یک / را خراش میدهد، و غیره.',
+ },
+ },
+ stepTwo: {
+ segmentation: 'تنظیمات بخشبندی',
+ auto: 'خودکار',
+ autoDescription: 'به طور خودکار قوانین بخشبندی و پیشپردازش را تنظیم کنید. به کاربران ناآشنا توصیه میشود این گزینه را انتخاب کنند.',
+ custom: 'سفارشی',
+ customDescription: 'قوانین بخشبندی، طول بخشها و قوانین پیشپردازش را سفارشی کنید، و غیره.',
+ separator: 'شناسه بخش',
+ separatorPlaceholder: 'برای مثال، خط جدید (\\\\n) یا جداکننده خاص (مانند "***")',
+ maxLength: 'حداکثر طول بخش',
+ overlap: 'همپوشانی بخش',
+ overlapTip: 'تنظیم همپوشانی بخش میتواند ارتباط معنایی بین آنها را حفظ کند و اثر بازیابی را افزایش دهد. توصیه میشود 10%-25% از حداکثر اندازه بخش تنظیم شود.',
+ overlapCheck: 'همپوشانی بخش نباید بزرگتر از طول حداکثر بخش باشد',
+ rules: 'قوانین پیشپردازش متن',
+ removeExtraSpaces: 'جایگزینی فضاهای متوالی، خطوط جدید و تبها',
+ removeUrlEmails: 'حذف همه URLها و آدرسهای ایمیل',
+ removeStopwords: 'حذف کلمات توقف مانند "a"، "an"، "the"',
+ preview: 'تأیید و پیشنمایش',
+ reset: 'بازنشانی',
+ indexMode: 'حالت شاخص',
+ qualified: 'کیفیت بالا',
+ recommend: 'توصیه شده',
+ qualifiedTip: 'رابط جاسازی سیستم پیشفرض را برای پردازش فراخوانی کنید تا دقت بالاتری هنگام پرسش کاربران فراهم شود.',
+ warning: 'لطفاً ابتدا کلید API ارائهدهنده مدل را تنظیم کنید.',
+ click: 'رفتن به تنظیمات',
+ economical: 'اقتصادی',
+ economicalTip: 'از موتورهای برداری آفلاین، شاخصهای کلیدواژه و غیره استفاده کنید تا دقت را بدون صرف توکنها کاهش دهید',
+ QATitle: 'بخشبندی در قالب پرسش و پاسخ',
+ QATip: 'فعال کردن این گزینه توکنهای بیشتری مصرف خواهد کرد',
+ QALanguage: 'بخشبندی با استفاده از',
+ emstimateCost: 'برآورد',
+ emstimateSegment: 'بخشهای برآورد شده',
+ segmentCount: 'بخشها',
+ calculating: 'در حال محاسبه...',
+ fileSource: 'پیشپردازش اسناد',
+ notionSource: 'پیشپردازش صفحات',
+ websiteSource: 'پیشپردازش وبسایت',
+ other: 'و سایر',
+ fileUnit: ' فایلها',
+ notionUnit: ' صفحات',
+ webpageUnit: ' صفحات',
+ previousStep: 'مرحله قبلی',
+ nextStep: 'ذخیره و پردازش',
+ save: 'ذخیره و پردازش',
+ cancel: 'لغو',
+ sideTipTitle: 'چرا بخشبندی و پیشپردازش؟',
+ sideTipP1: 'هنگام پردازش دادههای متنی، بخشبندی و پاکسازی دو مرحله مهم پیشپردازش هستند.',
+ sideTipP2: 'بخشبندی متن طولانی را به پاراگرافها تقسیم میکند تا مدلها بهتر بتوانند آن را درک کنند. این کیفیت و ارتباط نتایج مدل را بهبود میبخشد.',
+ sideTipP3: 'پاکسازی کاراکترها و فرمتهای غیرضروری را حذف میکند و دانش را پاکتر و آسانتر برای تجزیه میکند.',
+ sideTipP4: 'بخشبندی و پاکسازی مناسب عملکرد مدل را بهبود میبخشد و نتایج دقیقتر و ارزشمندتری ارائه میدهد.',
+ previewTitle: 'پیشنمایش',
+ previewTitleButton: 'پیشنمایش',
+ previewButton: 'تغییر به قالب پرسش و پاسخ',
+ previewSwitchTipStart: 'پیشنمایش بخش فعلی در قالب متن است، تغییر به پیشنمایش قالب پرسش و پاسخ',
+ previewSwitchTipEnd: ' توکنهای اضافی مصرف خواهد کرد',
+ characters: 'کاراکترها',
+ indexSettedTip: 'برای تغییر روش شاخص، لطفاً به',
+ retrivalSettedTip: 'برای تغییر روش شاخص، لطفاً به',
+ datasetSettingLink: 'تنظیمات دانش بروید.',
+ },
+ stepThree: {
+ creationTitle: ' دانش ایجاد شد',
+ creationContent: 'ما به طور خودکار نام دانش را تعیین کردیم، شما میتوانید هر زمان آن را تغییر دهید',
+ label: 'نام دانش',
+ additionTitle: ' سند بارگذاری شد',
+ additionP1: 'سند به دانش بارگذاری شده است',
+ additionP2: '، میتوانید آن را در لیست اسناد دانش پیدا کنید.',
+ stop: 'توقف پردازش',
+ resume: 'ادامه پردازش',
+ navTo: 'رفتن به سند',
+ sideTipTitle: 'بعدی چیست',
+ sideTipContent: 'پس از اتمام فهرستبندی سند، دانش میتواند به عنوان زمینه در برنامه یکپارچه شود، میتوانید تنظیمات زمینه را در صفحه ارکستراسیون درخواست پیدا کنید. همچنین میتوانید آن را به عنوان یک افزونه فهرستبندی مستقل ChatGPT برای انتشار ایجاد کنید.',
+ modelTitle: 'آیا مطمئن هستید که میخواهید جاسازی را متوقف کنید؟',
+ modelContent: 'اگر نیاز به ادامه پردازش بعداً دارید، از جایی که متوقف شدهاید ادامه خواهید داد.',
+ modelButtonConfirm: 'تأیید',
+ modelButtonCancel: 'لغو',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/dataset-documents.ts b/web/i18n/fa-IR/dataset-documents.ts
new file mode 100644
index 00000000000000..f136353c7b8870
--- /dev/null
+++ b/web/i18n/fa-IR/dataset-documents.ts
@@ -0,0 +1,351 @@
+const translation = {
+ list: {
+ title: 'اسناد',
+ desc: 'تمامی فایلهای دانش در اینجا نمایش داده میشوند و کل دانش میتواند به ارجاعات Dify متصل شود یا از طریق افزونه چت ایندکس شود.',
+ addFile: 'اضافه کردن فایل',
+ addPages: 'اضافه کردن صفحات',
+ addUrl: 'اضافه کردن URL',
+ table: {
+ header: {
+ fileName: 'نام فایل',
+ words: 'کلمات',
+ hitCount: 'تعداد بازیابی',
+ uploadTime: 'زمان بارگذاری',
+ status: 'وضعیت',
+ action: 'اقدام',
+ },
+ rename: 'تغییر نام',
+ name: 'نام',
+ },
+ action: {
+ uploadFile: 'بارگذاری فایل جدید',
+ settings: 'تنظیمات بخشبندی',
+ addButton: 'اضافه کردن قطعه',
+ add: 'اضافه کردن یک قطعه',
+ batchAdd: 'افزودن گروهی',
+ archive: 'بایگانی',
+ unarchive: 'خارج کردن از بایگانی',
+ delete: 'حذف',
+ enableWarning: 'فایل بایگانی شده نمیتواند فعال شود',
+ sync: 'همگامسازی',
+ },
+ index: {
+ enable: 'فعال کردن',
+ disable: 'غیرفعال کردن',
+ all: 'همه',
+ enableTip: 'فایل میتواند ایندکس شود',
+ disableTip: 'فایل نمیتواند ایندکس شود',
+ },
+ status: {
+ queuing: 'در صف',
+ indexing: 'ایندکسسازی',
+ paused: 'متوقف شده',
+ error: 'خطا',
+ available: 'موجود',
+ enabled: 'فعال شده',
+ disabled: 'غیرفعال شده',
+ archived: 'بایگانی شده',
+ },
+ empty: {
+ title: 'هنوز هیچ سندی وجود ندارد',
+ upload: {
+ tip: 'شما میتوانید فایلها را بارگذاری کنید، از وبسایت همگامسازی کنید، یا از برنامههای وبی مانند Notion، GitHub و غیره.',
+ },
+ sync: {
+ tip: 'Dify بهطور دورهای فایلها را از Notion شما دانلود و پردازش را کامل میکند.',
+ },
+ },
+ delete: {
+ title: 'آیا مطمئن هستید که حذف شود؟',
+ content: 'اگر بعداً نیاز به ادامه پردازش داشتید، از همان جایی که مانده بودید ادامه میدهید',
+ },
+ batchModal: {
+ title: 'افزودن گروهی قطعات',
+ csvUploadTitle: 'فایل CSV خود را اینجا بکشید و رها کنید، یا ',
+ browse: 'مرور کنید',
+ tip: 'فایل CSV باید به ساختار زیر مطابقت داشته باشد:',
+ question: 'سؤال',
+ answer: 'پاسخ',
+ contentTitle: 'محتوای قطعه',
+ content: 'محتوا',
+ template: 'الگو را از اینجا دانلود کنید',
+ cancel: 'لغو',
+ run: 'اجرای گروهی',
+ runError: 'اجرای گروهی ناموفق بود',
+ processing: 'در حال پردازش گروهی',
+ completed: 'واردات کامل شد',
+ error: 'خطای واردات',
+ ok: 'تأیید',
+ },
+ },
+ metadata: {
+ title: 'اطلاعات متا',
+ desc: 'برچسبگذاری متادیتا برای اسناد به هوش مصنوعی اجازه میدهد تا به موقع به آنها دسترسی پیدا کند و منبع ارجاعات را برای کاربران آشکار کند.',
+ dateTimeFormat: 'D MMMM YYYY hh:mm A',
+ docTypeSelectTitle: 'لطفاً یک نوع سند را انتخاب کنید',
+ docTypeChangeTitle: 'تغییر نوع سند',
+ docTypeSelectWarning: 'اگر نوع سند تغییر کند، متادیتای پر شده فعلی دیگر حفظ نخواهد شد',
+ firstMetaAction: 'بزن بریم',
+ placeholder: {
+ add: 'اضافه کردن ',
+ select: 'انتخاب ',
+ },
+ source: {
+ upload_file: 'بارگذاری فایل',
+ notion: 'همگامسازی از Notion',
+ github: 'همگامسازی از Github',
+ },
+ type: {
+ book: 'کتاب',
+ webPage: 'صفحه وب',
+ paper: 'مقاله',
+ socialMediaPost: 'پست شبکههای اجتماعی',
+ personalDocument: 'سند شخصی',
+ businessDocument: 'سند تجاری',
+ IMChat: 'چت IM',
+ wikipediaEntry: 'ورودی ویکیپدیا',
+ notion: 'همگامسازی از Notion',
+ github: 'همگامسازی از Github',
+ technicalParameters: 'پارامترهای فنی',
+ },
+ field: {
+ processRule: {
+ processDoc: 'پردازش سند',
+ segmentRule: 'قانون قطعهبندی',
+ segmentLength: 'طول قطعات',
+ processClean: 'تمیز کردن پردازش متن',
+ },
+ book: {
+ title: 'عنوان',
+ language: 'زبان',
+ author: 'نویسنده',
+ publisher: 'ناشر',
+ publicationDate: 'تاریخ انتشار',
+ ISBN: 'ISBN',
+ category: 'دستهبندی',
+ },
+ webPage: {
+ title: 'عنوان',
+ url: 'URL',
+ language: 'زبان',
+ authorPublisher: 'نویسنده/ناشر',
+ publishDate: 'تاریخ انتشار',
+ topicsKeywords: 'موضوعات/کلیدواژهها',
+ description: 'توضیحات',
+ },
+ paper: {
+ title: 'عنوان',
+ language: 'زبان',
+ author: 'نویسنده',
+ publishDate: 'تاریخ انتشار',
+ journalConferenceName: 'نام ژورنال/کنفرانس',
+ volumeIssuePage: 'جلد/شماره/صفحه',
+ DOI: 'DOI',
+ topicsKeywords: 'موضوعات/کلیدواژهها',
+ abstract: 'چکیده',
+ },
+ socialMediaPost: {
+ platform: 'پلتفرم',
+ authorUsername: 'نویسنده/نام کاربری',
+ publishDate: 'تاریخ انتشار',
+ postURL: 'URL پست',
+ topicsTags: 'موضوعات/برچسبها',
+ },
+ personalDocument: {
+ title: 'عنوان',
+ author: 'نویسنده',
+ creationDate: 'تاریخ ایجاد',
+ lastModifiedDate: 'تاریخ آخرین ویرایش',
+ documentType: 'نوع سند',
+ tagsCategory: 'برچسبها/دستهبندی',
+ },
+ businessDocument: {
+ title: 'عنوان',
+ author: 'نویسنده',
+ creationDate: 'تاریخ ایجاد',
+ lastModifiedDate: 'تاریخ آخرین ویرایش',
+ documentType: 'نوع سند',
+ departmentTeam: 'دپارتمان/تیم',
+ },
+ IMChat: {
+ chatPlatform: 'پلتفرم چت',
+ chatPartiesGroupName: 'طرفین چت/نام گروه',
+ participants: 'شرکتکنندگان',
+ startDate: 'تاریخ شروع',
+ endDate: 'تاریخ پایان',
+ topicsKeywords: 'موضوعات/کلیدواژهها',
+ fileType: 'نوع فایل',
+ },
+ wikipediaEntry: {
+ title: 'عنوان',
+ language: 'زبان',
+ webpageURL: 'URL صفحه وب',
+ editorContributor: 'ویرایشگر/همکار',
+ lastEditDate: 'تاریخ آخرین ویرایش',
+ summaryIntroduction: 'خلاصه/مقدمه',
+ },
+ notion: {
+ title: 'عنوان',
+ language: 'زبان',
+ author: 'نویسنده',
+ createdTime: 'زمان ایجاد',
+ lastModifiedTime: 'زمان آخرین ویرایش',
+ url: 'URL',
+ tag: 'برچسب',
+ description: 'توضیحات',
+ },
+ github: {
+ repoName: 'نام مخزن',
+ repoDesc: 'توضیحات مخزن',
+ repoOwner: 'مالک مخزن',
+ fileName: 'نام فایل',
+ filePath: 'مسیر فایل',
+ programmingLang: 'زبان برنامهنویسی',
+ url: 'URL',
+ license: 'مجوز',
+ lastCommitTime: 'زمان آخرین کامیت',
+ lastCommitAuthor: 'نویسنده آخرین کامیت',
+ },
+ originInfo: {
+ originalFilename: 'نام فایل اصلی',
+ originalFileSize: 'اندازه فایل اصلی',
+ uploadDate: 'تاریخ بارگذاری',
+ lastUpdateDate: 'تاریخ آخرین بروزرسانی',
+ source: 'منبع',
+ },
+ technicalParameters: {
+ segmentSpecification: 'مشخصات قطعات',
+ segmentLength: 'طول قطعات',
+ avgParagraphLength: 'طول متوسط پاراگراف',
+ paragraphs: 'پاراگرافها',
+ hitCount: 'تعداد بازیابی',
+ embeddingTime: 'زمان جاسازی',
+ embeddedSpend: 'هزینه جاسازی',
+ },
+ },
+ languageMap: {
+ zh: 'چینی',
+ en: 'انگلیسی',
+ es: 'اسپانیایی',
+ fr: 'فرانسوی',
+ de: 'آلمانی',
+ ja: 'ژاپنی',
+ ko: 'کرهای',
+ ru: 'روسی',
+ ar: 'عربی',
+ pt: 'پرتغالی',
+ it: 'ایتالیایی',
+ nl: 'هلندی',
+ pl: 'لهستانی',
+ sv: 'سوئدی',
+ tr: 'ترکی',
+ he: 'عبری',
+ hi: 'هندی',
+ da: 'دانمارکی',
+ fi: 'فنلاندی',
+ no: 'نروژی',
+ hu: 'مجاری',
+ el: 'یونانی',
+ cs: 'چکی',
+ th: 'تایلندی',
+ id: 'اندونزیایی',
+ },
+ categoryMap: {
+ book: {
+ fiction: 'داستان',
+ biography: 'زندگینامه',
+ history: 'تاریخ',
+ science: 'علم',
+ technology: 'فناوری',
+ education: 'آموزش',
+ philosophy: 'فلسفه',
+ religion: 'دین',
+ socialSciences: 'علوم اجتماعی',
+ art: 'هنر',
+ travel: 'سفر',
+ health: 'سلامت',
+ selfHelp: 'خودیاری',
+ businessEconomics: 'اقتصاد کسبوکار',
+ cooking: 'آشپزی',
+ childrenYoungAdults: 'کودکان و نوجوانان',
+ comicsGraphicNovels: 'کمیکها و رمانهای گرافیکی',
+ poetry: 'شعر',
+ drama: 'نمایشنامه',
+ other: 'دیگر',
+ },
+ personalDoc: {
+ notes: 'یادداشتها',
+ blogDraft: 'پیشنویس وبلاگ',
+ diary: 'دفتر خاطرات',
+ researchReport: 'گزارش پژوهش',
+ bookExcerpt: 'گزیده کتاب',
+ schedule: 'برنامهریزی',
+ list: 'فهرست',
+ projectOverview: 'نمای کلی پروژه',
+ photoCollection: 'مجموعه عکس',
+ creativeWriting: 'نوشته خلاقانه',
+ codeSnippet: 'قطعه کد',
+ designDraft: 'پیشنویس طراحی',
+ personalResume: 'رزومه شخصی',
+ other: 'دیگر',
+ },
+ businessDoc: {
+ meetingMinutes: 'صورتجلسه',
+ researchReport: 'گزارش پژوهش',
+ proposal: 'پیشنهاد',
+ employeeHandbook: 'راهنمای کارمند',
+ trainingMaterials: 'مواد آموزشی',
+ requirementsDocument: 'سند نیازمندیها',
+ designDocument: 'سند طراحی',
+ productSpecification: 'مشخصات محصول',
+ financialReport: 'گزارش مالی',
+ marketAnalysis: 'تحلیل بازار',
+ projectPlan: 'طرح پروژه',
+ teamStructure: 'ساختار تیم',
+ policiesProcedures: 'سیاستها و رویهها',
+ contractsAgreements: 'قراردادها و توافقنامهها',
+ emailCorrespondence: 'مکاتبات ایمیلی',
+ other: 'دیگر',
+ },
+ },
+ },
+ embedding: {
+ processing: 'در حال پردازش جاسازی...',
+ paused: 'جاسازی متوقف شده',
+ completed: 'جاسازی کامل شد',
+ error: 'خطای جاسازی',
+ docName: 'پیشپردازش سند',
+ mode: 'قانون بخشبندی',
+ segmentLength: 'طول قطعات',
+ textCleaning: 'پیشتعریف و تمیز کردن متن',
+ segments: 'پاراگرافها',
+ highQuality: 'حالت با کیفیت بالا',
+ economy: 'حالت اقتصادی',
+ estimate: 'مصرف تخمینی',
+ stop: 'توقف پردازش',
+ resume: 'ادامه پردازش',
+ automatic: 'خودکار',
+ custom: 'سفارشی',
+ previewTip: 'پیشنمایش پاراگراف پس از اتمام جاسازی در دسترس خواهد بود',
+ },
+ segment: {
+ paragraphs: 'پاراگرافها',
+ keywords: 'کلیدواژهها',
+ addKeyWord: 'اضافه کردن کلیدواژه',
+ keywordError: 'حداکثر طول کلیدواژه ۲۰ کاراکتر است',
+ characters: 'کاراکترها',
+ hitCount: 'تعداد بازیابی',
+ vectorHash: 'هش برداری: ',
+ questionPlaceholder: 'سؤال را اینجا اضافه کنید',
+ questionEmpty: 'سؤال نمیتواند خالی باشد',
+ answerPlaceholder: 'پاسخ را اینجا اضافه کنید',
+ answerEmpty: 'پاسخ نمیتواند خالی باشد',
+ contentPlaceholder: 'محتوا را اینجا اضافه کنید',
+ contentEmpty: 'محتوا نمیتواند خالی باشد',
+ newTextSegment: 'قطعه متن جدید',
+ newQaSegment: 'قطعه پرسش و پاسخ جدید',
+ delete: 'حذف این قطعه؟',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/dataset-hit-testing.ts b/web/i18n/fa-IR/dataset-hit-testing.ts
new file mode 100644
index 00000000000000..75b97c7abc0eca
--- /dev/null
+++ b/web/i18n/fa-IR/dataset-hit-testing.ts
@@ -0,0 +1,28 @@
+const translation = {
+ title: 'آزمون بازیابی',
+ desc: 'آزمون اثرگذاری دانش بر اساس متن پرسش داده شده.',
+ dateTimeFormat: 'MM/DD/YYYY hh:mm A',
+ recents: 'اخیرها',
+ table: {
+ header: {
+ source: 'منبع',
+ text: 'متن',
+ time: 'زمان',
+ },
+ },
+ input: {
+ title: 'متن منبع',
+ placeholder: 'لطفاً یک متن وارد کنید، یک جمله کوتاه خبری توصیه میشود.',
+ countWarning: 'تا ۲۰۰ کاراکتر.',
+ indexWarning: 'فقط دانش با کیفیت بالا.',
+ testing: 'در حال آزمون',
+ },
+ hit: {
+ title: 'پاراگرافهای بازیابی',
+ emptyTip: 'نتایج آزمون بازیابی اینجا نمایش داده میشوند',
+ },
+ noRecentTip: 'اینجا نتیجه پرسش اخیر وجود ندارد',
+ viewChart: 'مشاهده نمودار بُرداری',
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/dataset-settings.ts b/web/i18n/fa-IR/dataset-settings.ts
new file mode 100644
index 00000000000000..a9c9bd8110e929
--- /dev/null
+++ b/web/i18n/fa-IR/dataset-settings.ts
@@ -0,0 +1,35 @@
+const translation = {
+ title: 'تنظیمات دانش',
+ desc: 'اینجا میتوانید ویژگیها و روشهای کاری دانش را تغییر دهید.',
+ form: {
+ name: 'نام دانش',
+ namePlaceholder: 'لطفاً نام دانش را وارد کنید',
+ nameError: 'نام نمیتواند خالی باشد',
+ desc: 'توضیحات دانش',
+ descInfo: 'لطفاً یک توضیح متنی واضح بنویسید تا محتوای دانش را مشخص کند. این توضیحات به عنوان مبنایی برای تطبیق هنگام انتخاب از چندین دانش برای استنتاج استفاده خواهد شد.',
+ descPlaceholder: 'توضیح دهید که در این دانش چه چیزی وجود دارد. توضیحات دقیق به هوش مصنوعی اجازه میدهد تا به موقع به محتوای دانش دسترسی پیدا کند. اگر خالی باشد، دیفی از استراتژی پیشفرض استفاده خواهد کرد.',
+ descWrite: 'یاد بگیرید چگونه یک توضیح دانش خوب بنویسید.',
+ permissions: 'مجوزها',
+ permissionsOnlyMe: 'فقط من',
+ permissionsAllMember: 'تمام اعضای تیم',
+ permissionsInvitedMembers: 'برخی از اعضای تیم',
+ me: '(شما)',
+ indexMethod: 'روش نمایهسازی',
+ indexMethodHighQuality: 'کیفیت بالا',
+ indexMethodHighQualityTip: 'مدل تعبیه را برای پردازش فراخوانی کنید تا دقت بالاتری هنگام جستجوی کاربران فراهم شود.',
+ indexMethodEconomy: 'اقتصادی',
+ indexMethodEconomyTip: 'استفاده از موتورهای برداری آفلاین، شاخصهای کلمات کلیدی و غیره برای کاهش دقت بدون صرف توکنها',
+ embeddingModel: 'مدل تعبیه',
+ embeddingModelTip: 'برای تغییر مدل تعبیه، لطفاً به ',
+ embeddingModelTipLink: 'تنظیمات',
+ retrievalSetting: {
+ title: 'تنظیمات بازیابی',
+ learnMore: 'بیشتر بدانید',
+ description: ' درباره روش بازیابی.',
+ longDescription: ' درباره روش بازیابی، میتوانید در هر زمانی در تنظیمات دانش این را تغییر دهید.',
+ },
+ save: 'ذخیره',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/dataset.ts b/web/i18n/fa-IR/dataset.ts
new file mode 100644
index 00000000000000..30036dc68f1bb0
--- /dev/null
+++ b/web/i18n/fa-IR/dataset.ts
@@ -0,0 +1,76 @@
+const translation = {
+ knowledge: 'دانش',
+ documentCount: ' سند',
+ wordCount: ' هزار کلمه',
+ appCount: ' برنامههای متصل',
+ createDataset: 'ایجاد دانش',
+ createDatasetIntro: 'دادههای متنی خود را وارد کنید یا از طریق Webhook در زمان واقعی برای بهبود زمینه LLM بنویسید.',
+ deleteDatasetConfirmTitle: 'حذف این دانش؟',
+ deleteDatasetConfirmContent:
+ 'حذف دانش غیر قابل برگشت است. کاربران دیگر نمیتوانند به دانش شما دسترسی پیدا کنند و تمام تنظیمات درخواست و گزارشها به طور دائم حذف خواهند شد.',
+ datasetUsedByApp: 'دانش توسط برخی برنامهها استفاده میشود. برنامهها دیگر نمیتوانند از این دانش استفاده کنند و تمام تنظیمات درخواست و گزارشها به طور دائم حذف خواهند شد.',
+ datasetDeleted: 'دانش حذف شد',
+ datasetDeleteFailed: 'حذف دانش ناموفق بود',
+ didYouKnow: 'آیا میدانستید؟',
+ intro1: 'دانش میتواند در برنامه Dify ',
+ intro2: 'به عنوان یک زمینه',
+ intro3: 'ادغام شود',
+ intro4: 'یا میتواند ',
+ intro5: 'به عنوان یک افزونه مستقل ChatGPT برای انتشار',
+ intro6: 'ایجاد شود',
+ unavailable: 'در دسترس نیست',
+ unavailableTip: 'مدل جاسازی در دسترس نیست، نیاز است مدل جاسازی پیشفرض پیکربندی شود',
+ datasets: 'دانش',
+ datasetsApi: 'دسترسی API',
+ retrieval: {
+ semantic_search: {
+ title: 'جستجوی برداری',
+ description: 'تولید جاسازیهای جستجو و جستجوی بخش متنی که بیشترین شباهت را به نمایش برداری آن دارد.',
+ },
+ full_text_search: {
+ title: 'جستجوی متن کامل',
+ description: 'فهرست کردن تمام اصطلاحات در سند، به کاربران اجازه میدهد هر اصطلاحی را جستجو کنند و بخش متنی مربوط به آن اصطلاحات را بازیابی کنند.',
+ },
+ hybrid_search: {
+ title: 'جستجوی هیبریدی',
+ description: 'جستجوی متن کامل و برداری را همزمان اجرا میکند، دوباره رتبهبندی میکند تا بهترین تطابق برای درخواست کاربر انتخاب شود. کاربران میتوانند وزنها را تنظیم کنند یا به یک مدل دوباره رتبهبندی تنظیم کنند.',
+ recommend: 'توصیه',
+ },
+ invertedIndex: {
+ title: 'فهرست معکوس',
+ description: 'فهرست معکوس یک ساختار برای بازیابی کارآمد است. توسط اصطلاحات سازماندهی شده، هر اصطلاح به اسناد یا صفحات وب حاوی آن اشاره میکند.',
+ },
+ change: 'تغییر',
+ changeRetrievalMethod: 'تغییر روش بازیابی',
+ },
+ docsFailedNotice: 'اسناد نتوانستند فهرستبندی شوند',
+ retry: 'تلاش مجدد',
+ indexingTechnique: {
+ high_quality: 'HQ',
+ economy: 'ECO',
+ },
+ indexingMethod: {
+ semantic_search: 'برداری',
+ full_text_search: 'متن کامل',
+ hybrid_search: 'هیبریدی',
+ invertedIndex: 'معکوس',
+ },
+ mixtureHighQualityAndEconomicTip: 'مدل دوباره رتبهبندی برای ترکیب پایگاههای دانش با کیفیت بالا و اقتصادی لازم است.',
+ inconsistentEmbeddingModelTip: 'مدل دوباره رتبهبندی لازم است اگر مدلهای جاسازی پایگاههای دانش انتخابی ناسازگار باشند.',
+ retrievalSettings: 'تنظیمات بازیابی',
+ rerankSettings: 'تنظیمات دوباره رتبهبندی',
+ weightedScore: {
+ title: 'امتیاز وزنی',
+ description: 'با تنظیم وزنهای اختصاص داده شده، این استراتژی دوباره رتبهبندی تعیین میکند که آیا اولویت با تطابق معنایی یا کلمات کلیدی است.',
+ semanticFirst: 'اولویت معنایی',
+ keywordFirst: 'اولویت کلمه کلیدی',
+ customized: 'سفارشیسازی شده',
+ semantic: 'معنایی',
+ keyword: 'کلمه کلیدی',
+ },
+ nTo1RetrievalLegacy: 'بازیابی N-to-1 از سپتامبر به طور رسمی منسوخ خواهد شد. توصیه میشود از بازیابی چند مسیر جدید استفاده کنید تا نتایج بهتری بدست آورید.',
+ nTo1RetrievalLegacyLink: 'بیشتر بدانید',
+ nTo1RetrievalLegacyLinkText: ' بازیابی N-to-1 از سپتامبر به طور رسمی منسوخ خواهد شد.',
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/explore.ts b/web/i18n/fa-IR/explore.ts
new file mode 100644
index 00000000000000..404a9f2593c27b
--- /dev/null
+++ b/web/i18n/fa-IR/explore.ts
@@ -0,0 +1,41 @@
+const translation = {
+ title: 'کاوش',
+ sidebar: {
+ discovery: 'کشف',
+ chat: 'چت',
+ workspace: 'فضای کاری',
+ action: {
+ pin: 'سنجاق کردن',
+ unpin: 'برداشتن سنجاق',
+ rename: 'تغییر نام',
+ delete: 'حذف',
+ },
+ delete: {
+ title: 'حذف برنامه',
+ content: 'آیا مطمئن هستید که میخواهید این برنامه را حذف کنید؟',
+ },
+ },
+ apps: {
+ title: 'کاوش برنامهها توسط دیفی',
+ description: 'از این برنامههای قالبی بلافاصله استفاده کنید یا برنامههای خود را بر اساس این قالبها سفارشی کنید.',
+ allCategories: 'پیشنهاد شده',
+ },
+ appCard: {
+ addToWorkspace: 'افزودن به فضای کاری',
+ customize: 'سفارشی کردن',
+ },
+ appCustomize: {
+ title: 'ایجاد برنامه از {{name}}',
+ subTitle: 'آیکون و نام برنامه',
+ nameRequired: 'نام برنامه الزامی است',
+ },
+ category: {
+ Assistant: 'دستیار',
+ Writing: 'نوشتن',
+ Translate: 'ترجمه',
+ Programming: 'برنامهنویسی',
+ HR: 'منابع انسانی',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/layout.ts b/web/i18n/fa-IR/layout.ts
new file mode 100644
index 00000000000000..928649474b4dcd
--- /dev/null
+++ b/web/i18n/fa-IR/layout.ts
@@ -0,0 +1,4 @@
+const translation = {
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/login.ts b/web/i18n/fa-IR/login.ts
new file mode 100644
index 00000000000000..8912561efed909
--- /dev/null
+++ b/web/i18n/fa-IR/login.ts
@@ -0,0 +1,75 @@
+const translation = {
+ pageTitle: 'هی، بیایید شروع کنیم!👋',
+ welcome: 'به Dify خوش آمدید، لطفا برای ادامه وارد شوید.',
+ email: 'آدرس ایمیل',
+ emailPlaceholder: 'ایمیل شما',
+ password: 'رمز عبور',
+ passwordPlaceholder: 'رمز عبور شما',
+ name: 'نام کاربری',
+ namePlaceholder: 'نام کاربری شما',
+ forget: 'رمز عبور خود را فراموش کردهاید؟',
+ signBtn: 'ورود',
+ sso: 'ادامه با SSO',
+ installBtn: 'راهاندازی',
+ setAdminAccount: 'راهاندازی حساب مدیر',
+ setAdminAccountDesc: 'بیشترین امتیازات برای حساب مدیر، که میتواند برای ایجاد برنامهها و مدیریت ارائهدهندگان LLM و غیره استفاده شود.',
+ createAndSignIn: 'ایجاد و ورود',
+ oneMoreStep: 'یک مرحله دیگر',
+ createSample: 'بر اساس این اطلاعات، ما برای شما یک نمونه برنامه ایجاد خواهیم کرد',
+ invitationCode: 'کد دعوت',
+ invitationCodePlaceholder: 'کد دعوت شما',
+ interfaceLanguage: 'زبان رابط کاربری',
+ timezone: 'منطقه زمانی',
+ go: 'برو به Dify',
+ sendUsMail: 'ایمیل معرفی خود را برای ما ارسال کنید، و ما درخواست دعوت را بررسی خواهیم کرد.',
+ acceptPP: 'من سیاست حفظ حریم خصوصی را خوانده و قبول میکنم',
+ reset: 'لطفاً برای بازنشانی رمز عبور خود دستور زیر را اجرا کنید',
+ withGitHub: 'ادامه با GitHub',
+ withGoogle: 'ادامه با Google',
+ rightTitle: 'پتانسیل کامل LLM را باز کنید',
+ rightDesc: 'به راحتی برنامههای AI با ظاهری جذاب، قابل اجرا و بهبود پذیر ایجاد کنید.',
+ tos: 'شرایط خدمات',
+ pp: 'سیاست حفظ حریم خصوصی',
+ tosDesc: 'با ثبت نام، شما با شرایط ما موافقت میکنید',
+ goToInit: 'اگر حساب را اولیه نکردهاید، لطفاً به صفحه اولیهسازی بروید',
+ donthave: 'ندارید؟',
+ invalidInvitationCode: 'کد دعوت نامعتبر است',
+ accountAlreadyInited: 'حساب قبلاً اولیه شده است',
+ forgotPassword: 'رمز عبور خود را فراموش کردهاید؟',
+ resetLinkSent: 'لینک بازنشانی ارسال شد',
+ sendResetLink: 'ارسال لینک بازنشانی',
+ backToSignIn: 'بازگشت به ورود',
+ forgotPasswordDesc: 'لطفاً آدرس ایمیل خود را وارد کنید تا رمز عبور خود را بازنشانی کنید. ما یک ایمیل با دستورالعملهای بازنشانی برای شما ارسال خواهیم کرد.',
+ checkEmailForResetLink: 'لطفاً ایمیل خود را برای لینک بازنشانی رمز عبور بررسی کنید. اگر در عرض چند دقیقه ظاهر نشد، پوشه اسپم خود را بررسی کنید.',
+ passwordChanged: 'اکنون وارد شوید',
+ changePassword: 'تغییر رمز عبور',
+ changePasswordTip: 'لطفاً یک رمز عبور جدید برای حساب خود وارد کنید',
+ invalidToken: 'توکن نامعتبر یا منقضی شده است',
+ confirmPassword: 'تایید رمز عبور',
+ confirmPasswordPlaceholder: 'رمز عبور جدید خود را تایید کنید',
+ passwordChangedTip: 'رمز عبور شما با موفقیت تغییر یافت',
+ error: {
+ emailEmpty: 'آدرس ایمیل لازم است',
+ emailInValid: 'لطفاً یک آدرس ایمیل معتبر وارد کنید',
+ nameEmpty: 'نام لازم است',
+ passwordEmpty: 'رمز عبور لازم است',
+ passwordLengthInValid: 'رمز عبور باید حداقل ۸ کاراکتر باشد',
+ passwordInvalid: 'رمز عبور باید شامل حروف و اعداد باشد و طول آن بیشتر از ۸ کاراکتر باشد',
+ },
+ license: {
+ tip: 'قبل از شروع Dify Community Edition، GitHub را بخوانید',
+ link: 'مجوز منبع باز',
+ },
+ join: 'عضویت',
+ joinTipStart: 'شما را دعوت میکنیم به',
+ joinTipEnd: 'تیم در Dify',
+ invalid: 'لینک منقضی شده است',
+ explore: 'کاوش Dify',
+ activatedTipStart: 'شما به',
+ activatedTipEnd: 'تیم پیوستهاید',
+ activated: 'اکنون وارد شوید',
+ adminInitPassword: 'رمز عبور اولیه مدیر',
+ validate: 'اعتبارسنجی',
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/register.ts b/web/i18n/fa-IR/register.ts
new file mode 100644
index 00000000000000..928649474b4dcd
--- /dev/null
+++ b/web/i18n/fa-IR/register.ts
@@ -0,0 +1,4 @@
+const translation = {
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/run-log.ts b/web/i18n/fa-IR/run-log.ts
new file mode 100644
index 00000000000000..4423d4523b7f15
--- /dev/null
+++ b/web/i18n/fa-IR/run-log.ts
@@ -0,0 +1,29 @@
+const translation = {
+ input: 'ورودی',
+ result: 'نتیجه',
+ detail: 'جزئیات',
+ tracing: 'ردیابی',
+ resultPanel: {
+ status: 'وضعیت',
+ time: 'زمان گذشته',
+ tokens: 'کل توکنها',
+ },
+ meta: {
+ title: 'فراداده',
+ status: 'وضعیت',
+ version: 'نسخه',
+ executor: 'اجراکننده',
+ startTime: 'زمان شروع',
+ time: 'زمان گذشته',
+ tokens: 'کل توکنها',
+ steps: 'گامهای اجرا',
+ },
+ resultEmpty: {
+ title: 'این اجرا فقط خروجی به فرمت JSON دارد،',
+ tipLeft: 'لطفاً به ',
+ link: 'پنل جزئیات',
+ tipRight: ' بروید و آن را مشاهده کنید.',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/share-app.ts b/web/i18n/fa-IR/share-app.ts
new file mode 100644
index 00000000000000..b74c893e6e08db
--- /dev/null
+++ b/web/i18n/fa-IR/share-app.ts
@@ -0,0 +1,70 @@
+const translation = {
+ common: {
+ welcome: '',
+ appUnavailable: 'اپ در دسترس نیست',
+ appUnkonwError: 'اپ در دسترس نیست',
+ },
+ chat: {
+ newChat: 'چت جدید',
+ pinnedTitle: 'پین شده',
+ unpinnedTitle: 'چتها',
+ newChatDefaultName: 'مکالمه جدید',
+ resetChat: 'بازنشانی مکالمه',
+ powerBy: 'قدرتگرفته از',
+ prompt: 'پیشنهاد',
+ privatePromptConfigTitle: 'تنظیمات مکالمه',
+ publicPromptConfigTitle: 'پیشنهاد اولیه',
+ configStatusDes: 'قبل از شروع، میتوانید تنظیمات مکالمه را تغییر دهید',
+ configDisabled: 'تنظیمات جلسه قبلی برای این جلسه استفاده شده است.',
+ startChat: 'شروع چت',
+ privacyPolicyLeft: 'لطفاً ',
+ privacyPolicyMiddle: 'سیاست حریم خصوصی',
+ privacyPolicyRight: ' ارائه شده توسط توسعهدهنده اپ را بخوانید.',
+ deleteConversation: {
+ title: 'حذف مکالمه',
+ content: 'آیا مطمئن هستید که میخواهید این مکالمه را حذف کنید؟',
+ },
+ tryToSolve: 'سعی کنید حل کنید',
+ temporarySystemIssue: 'ببخشید، مشکل موقت سیستمی.',
+ },
+ generation: {
+ tabs: {
+ create: 'یکبار اجرا کن',
+ batch: 'اجرا به صورت گروهی',
+ saved: 'ذخیره شده',
+ },
+ savedNoData: {
+ title: 'شما هنوز نتیجهای ذخیره نکردهاید!',
+ description: 'شروع به تولید محتوا کنید و نتایج ذخیره شده خود را اینجا پیدا کنید.',
+ startCreateContent: 'شروع به تولید محتوا',
+ },
+ title: 'تکمیل هوش مصنوعی',
+ queryTitle: 'محتوای درخواست',
+ completionResult: 'نتیجه تکمیل',
+ queryPlaceholder: 'محتوای درخواست خود را بنویسید...',
+ run: 'اجرا',
+ copy: 'کپی',
+ resultTitle: 'تکمیل هوش مصنوعی',
+ noData: 'هوش مصنوعی آنچه را که میخواهید اینجا به شما میدهد.',
+ csvUploadTitle: 'فایل CSV خود را اینجا بکشید و رها کنید، یا ',
+ browse: 'جستجو',
+ csvStructureTitle: 'فایل CSV باید با ساختار زیر مطابقت داشته باشد:',
+ downloadTemplate: 'الگو را اینجا دانلود کنید',
+ field: 'فیلد',
+ batchFailed: {
+ info: '{{num}} اجرای ناموفق',
+ retry: 'تلاش مجدد',
+ outputPlaceholder: 'محتوای خروجی وجود ندارد',
+ },
+ errorMsg: {
+ empty: 'لطفاً محتوا را در فایل بارگذاری شده وارد کنید.',
+ fileStructNotMatch: 'فایل CSV بارگذاری شده با ساختار مطابقت ندارد.',
+ emptyLine: 'ردیف {{rowIndex}} خالی است',
+ invalidLine: 'ردیف {{rowIndex}}: مقدار {{varName}} نمیتواند خالی باشد',
+ moreThanMaxLengthLine: 'ردیف {{rowIndex}}: مقدار {{varName}} نمیتواند بیشتر از {{maxLength}} کاراکتر باشد',
+ atLeastOne: 'لطفاً حداقل یک ردیف در فایل بارگذاری شده وارد کنید.',
+ },
+ },
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/tools.ts b/web/i18n/fa-IR/tools.ts
new file mode 100644
index 00000000000000..002f55d1d4adbf
--- /dev/null
+++ b/web/i18n/fa-IR/tools.ts
@@ -0,0 +1,153 @@
+const translation = {
+ title: 'ابزارها',
+ createCustomTool: 'ایجاد ابزار سفارشی',
+ customToolTip: 'بیشتر در مورد ابزارهای سفارشی Dify بیاموزید',
+ type: {
+ all: 'همه',
+ builtIn: 'سفارشی شده',
+ custom: 'سفارشی',
+ workflow: 'جریان کار',
+ },
+ contribute: {
+ line1: 'من علاقهمند به ',
+ line2: 'مشارکت در ابزارهای Dify هستم.',
+ viewGuide: 'مشاهده راهنما',
+ },
+ author: 'توسط',
+ auth: {
+ unauthorized: 'برای مجوز دادن',
+ authorized: 'مجوز داده شده',
+ setup: 'تنظیم مجوز برای استفاده',
+ setupModalTitle: 'تنظیم مجوز',
+ setupModalTitleDescription: 'پس از پیکربندی اعتبارنامهها، همه اعضای موجود در فضای کاری میتوانند از این ابزار هنگام هماهنگی برنامهها استفاده کنند.',
+ },
+ includeToolNum: '{{num}} ابزار شامل شد',
+ addTool: 'افزودن ابزار',
+ addToolModal: {
+ type: 'نوع',
+ category: 'دستهبندی',
+ add: 'افزودن',
+ added: 'افزوده شد',
+ manageInTools: 'مدیریت در ابزارها',
+ emptyTitle: 'هیچ ابزار جریان کاری در دسترس نیست',
+ emptyTip: 'به "جریان کاری -> انتشار به عنوان ابزار" بروید',
+ },
+ createTool: {
+ title: 'ایجاد ابزار سفارشی',
+ editAction: 'پیکربندی',
+ editTitle: 'ویرایش ابزار سفارشی',
+ name: 'نام',
+ toolNamePlaceHolder: 'نام ابزار را وارد کنید',
+ nameForToolCall: 'نام فراخوانی ابزار',
+ nameForToolCallPlaceHolder: 'برای شناسایی ماشین، مانند getCurrentWeather، list_pets',
+ nameForToolCallTip: 'فقط اعداد، حروف و خط زیر پشتیبانی میشود.',
+ description: 'توضیحات',
+ descriptionPlaceholder: 'توضیحات مختصر در مورد هدف ابزار، مثلاً، گرفتن دما برای یک مکان خاص.',
+ schema: 'طرح',
+ schemaPlaceHolder: 'طرح OpenAPI خود را اینجا وارد کنید',
+ viewSchemaSpec: 'مشاهده مشخصات OpenAPI-Swagger',
+ importFromUrl: 'وارد کردن از URL',
+ importFromUrlPlaceHolder: 'https://...',
+ urlError: 'لطفاً یک URL معتبر وارد کنید',
+ examples: 'مثالها',
+ exampleOptions: {
+ json: 'آب و هوا (JSON)',
+ yaml: 'فروشگاه حیوانات خانگی (YAML)',
+ blankTemplate: 'الگوی خالی',
+ },
+ availableTools: {
+ title: 'ابزارهای موجود',
+ name: 'نام',
+ description: 'توضیحات',
+ method: 'روش',
+ path: 'مسیر',
+ action: 'عملیات',
+ test: 'آزمایش',
+ },
+ authMethod: {
+ title: 'روش مجوز',
+ type: 'نوع مجوز',
+ keyTooltip: 'کلید Http Header، میتوانید آن را با "Authorization" ترک کنید اگر نمیدانید چیست یا آن را به یک مقدار سفارشی تنظیم کنید',
+ types: {
+ none: 'هیچ',
+ api_key: 'کلید API',
+ apiKeyPlaceholder: 'نام هدر HTTP برای کلید API',
+ apiValuePlaceholder: 'کلید API را وارد کنید',
+ },
+ key: 'کلید',
+ value: 'مقدار',
+ },
+ authHeaderPrefix: {
+ title: 'نوع مجوز',
+ types: {
+ basic: 'پایه',
+ bearer: 'Bearer',
+ custom: 'سفارشی',
+ },
+ },
+ privacyPolicy: 'سیاست حفظ حریم خصوصی',
+ privacyPolicyPlaceholder: 'لطفاً سیاست حفظ حریم خصوصی را وارد کنید',
+ toolInput: {
+ title: 'ورودی ابزار',
+ name: 'نام',
+ required: 'الزامی',
+ method: 'روش',
+ methodSetting: 'تنظیم',
+ methodSettingTip: 'کاربر پیکربندی ابزار را پر میکند',
+ methodParameter: 'پارامتر',
+ methodParameterTip: 'LLM در طول استنباط پر میکند',
+ label: 'برچسبها',
+ labelPlaceholder: 'برچسبها را انتخاب کنید (اختیاری)',
+ description: 'توضیحات',
+ descriptionPlaceholder: 'توضیحات معنی پارامتر',
+ },
+ customDisclaimer: 'توجهیه سفارشی',
+ customDisclaimerPlaceholder: 'لطفاً توجهیه سفارشی را وارد کنید',
+ confirmTitle: 'آیا میخواهید ذخیره کنید؟',
+ confirmTip: 'برنامههایی که از این ابزار استفاده میکنند تحت تأثیر قرار خواهند گرفت',
+ deleteToolConfirmTitle: 'آیا این ابزار را حذف کنید؟',
+ deleteToolConfirmContent: 'حذف ابزار غیرقابل بازگشت است. کاربران دیگر قادر به دسترسی به ابزار شما نخواهند بود.',
+ },
+ test: {
+ title: 'آزمایش',
+ parametersValue: 'پارامترها و مقدار',
+ parameters: 'پارامترها',
+ value: 'مقدار',
+ testResult: 'نتایج آزمایش',
+ testResultPlaceholder: 'نتیجه آزمایش در اینجا نمایش داده میشود',
+ },
+ thought: {
+ using: 'در حال استفاده',
+ used: 'استفاده شده',
+ requestTitle: 'درخواست به',
+ responseTitle: 'پاسخ از',
+ },
+ setBuiltInTools: {
+ info: 'اطلاعات',
+ setting: 'تنظیمات',
+ toolDescription: 'توضیحات ابزار',
+ parameters: 'پارامترها',
+ string: 'رشته',
+ number: 'عدد',
+ required: 'الزامی',
+ infoAndSetting: 'اطلاعات و تنظیمات',
+ },
+ noCustomTool: {
+ title: 'ابزار سفارشی وجود ندارد!',
+ content: 'ابزارهای سفارشی خود را در اینجا اضافه و مدیریت کنید تا برنامههای هوش مصنوعی بسازید.',
+ createTool: 'ایجاد ابزار',
+ },
+ noSearchRes: {
+ title: 'متأسفیم، نتیجهای پیدا نشد!',
+ content: 'ما نتوانستیم ابزارهایی که با جستجوی شما مطابقت داشته باشد پیدا کنیم.',
+ reset: 'بازنشانی جستجو',
+ },
+ builtInPromptTitle: 'پرامپت',
+ toolRemoved: 'ابزار حذف شد',
+ notAuthorized: 'ابزار مجوز ندارد',
+ howToGet: 'چگونه دریافت کنید',
+ openInStudio: 'باز کردن در استودیو',
+ toolNameUsageTip: 'نام فراخوانی ابزار برای استدلال و پرامپتهای عامل',
+}
+
+export default translation
diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts
new file mode 100644
index 00000000000000..72f2c12141e02b
--- /dev/null
+++ b/web/i18n/fa-IR/workflow.ts
@@ -0,0 +1,498 @@
+const translation = {
+ common: {
+ undo: 'بازگشت',
+ redo: 'پیشرفت',
+ editing: 'ویرایش',
+ autoSaved: 'ذخیره خودکار',
+ unpublished: 'منتشر نشده',
+ published: 'منتشر شده',
+ publish: 'انتشار',
+ update: 'بهروزرسانی',
+ run: 'اجرا',
+ running: 'در حال اجرا',
+ inRunMode: 'در حالت اجرا',
+ inPreview: 'در پیشنمایش',
+ inPreviewMode: 'در حالت پیشنمایش',
+ preview: 'پیشنمایش',
+ viewRunHistory: 'مشاهده تاریخچه اجرا',
+ runHistory: 'تاریخچه اجرا',
+ goBackToEdit: 'بازگشت به ویرایشگر',
+ conversationLog: 'گزارش مکالمات',
+ features: 'ویژگیها',
+ debugAndPreview: 'پیشنمایش',
+ restart: 'راهاندازی مجدد',
+ currentDraft: 'پیشنویس فعلی',
+ currentDraftUnpublished: 'پیشنویس فعلی منتشر نشده',
+ latestPublished: 'آخرین نسخه منتشر شده',
+ publishedAt: 'منتشر شده',
+ restore: 'بازیابی',
+ runApp: 'اجرای اپلیکیشن',
+ batchRunApp: 'اجرای دستهای اپلیکیشن',
+ accessAPIReference: 'دسترسی به مستندات API',
+ embedIntoSite: 'درج در سایت',
+ addTitle: 'افزودن عنوان...',
+ addDescription: 'افزودن توضیحات...',
+ noVar: 'هیچ متغیری',
+ searchVar: 'جستجوی متغیر',
+ variableNamePlaceholder: 'نام متغیر',
+ setVarValuePlaceholder: 'تنظیم متغیر',
+ needConnecttip: 'این مرحله به هیچ چیزی متصل نیست',
+ maxTreeDepth: 'حداکثر عمق {{depth}} نود در هر شاخه',
+ needEndNode: 'بلوک پایان باید اضافه شود',
+ needAnswerNode: 'بلوک پاسخ باید اضافه شود',
+ workflowProcess: 'فرآیند جریان کار',
+ notRunning: 'هنوز در حال اجرا نیست',
+ previewPlaceholder: 'محتوا را در کادر زیر وارد کنید تا اشکالزدایی چتبات را شروع کنید',
+ effectVarConfirm: {
+ title: 'حذف متغیر',
+ content: 'متغیر در نودهای دیگر استفاده شده است. آیا همچنان میخواهید آن را حذف کنید؟',
+ },
+ insertVarTip: 'برای درج سریع کلید \'/\' را فشار دهید',
+ processData: 'پردازش دادهها',
+ input: 'ورودی',
+ output: 'خروجی',
+ jinjaEditorPlaceholder: 'برای درج متغیر \'/\' یا \'{\' را تایپ کنید',
+ viewOnly: 'فقط مشاهده',
+ showRunHistory: 'نمایش تاریخچه اجرا',
+ enableJinja: 'فعالسازی پشتیبانی از الگوهای Jinja',
+ learnMore: 'اطلاعات بیشتر',
+ copy: 'کپی',
+ duplicate: 'تکرار',
+ addBlock: 'افزودن بلوک',
+ pasteHere: 'چسباندن اینجا',
+ pointerMode: 'حالت اشارهگر',
+ handMode: 'حالت دست',
+ model: 'مدل',
+ workflowAsTool: 'جریان کار به عنوان ابزار',
+ configureRequired: 'پیکربندی مورد نیاز',
+ configure: 'پیکربندی',
+ manageInTools: 'مدیریت در ابزارها',
+ workflowAsToolTip: 'پیکربندی ابزار پس از بهروزرسانی جریان کار مورد نیاز است.',
+ viewDetailInTracingPanel: 'مشاهده جزئیات',
+ syncingData: 'همگامسازی دادهها، فقط چند ثانیه',
+ importDSL: 'وارد کردن DSL',
+ importDSLTip: 'پیشنویس فعلی بر روی هم نوشته خواهد شد. قبل از وارد کردن، جریان کار را به عنوان نسخه پشتیبان صادر کنید.',
+ backupCurrentDraft: 'پشتیبانگیری از پیشنویس فعلی',
+ chooseDSL: 'انتخاب فایل DSL(yml)',
+ overwriteAndImport: 'بازنویسی و وارد کردن',
+ importFailure: 'خطا در وارد کردن',
+ importSuccess: 'وارد کردن موفقیتآمیز',
+ },
+ env: {
+ envPanelTitle: 'متغیرهای محیطی',
+ envDescription: 'متغیرهای محیطی میتوانند برای ذخیره اطلاعات خصوصی و اعتبارنامهها استفاده شوند. آنها فقط خواندنی هستند و میتوانند در حین صادر کردن از فایل DSL جدا شوند.',
+ envPanelButton: 'افزودن متغیر',
+ modal: {
+ title: 'افزودن متغیر محیطی',
+ editTitle: 'ویرایش متغیر محیطی',
+ type: 'نوع',
+ name: 'نام',
+ namePlaceholder: 'نام متغیر',
+ value: 'مقدار',
+ valuePlaceholder: 'مقدار متغیر',
+ secretTip: 'برای تعریف اطلاعات حساس یا دادهها، با تنظیمات DSL برای جلوگیری از نشت پیکربندی شده است.',
+ },
+ export: {
+ title: 'آیا متغیرهای محیطی مخفی را صادر کنید؟',
+ checkbox: 'صادر کردن مقادیر مخفی',
+ ignore: 'صادر کردن DSL',
+ export: 'صادر کردن DSL با مقادیر مخفی',
+ },
+ },
+ changeHistory: {
+ title: 'تاریخچه تغییرات',
+ placeholder: 'هنوز تغییری ایجاد نکردید',
+ clearHistory: 'پاک کردن تاریخچه',
+ hint: 'راهنما',
+ hintText: 'عملیات ویرایش شما در تاریخچه تغییرات پیگیری میشود که برای مدت این جلسه بر روی دستگاه شما ذخیره میشود. این تاریخچه هنگام خروج از ویرایشگر پاک خواهد شد.',
+ stepBackward_one: '{{count}} قدم به عقب',
+ stepBackward_other: '{{count}} قدم به عقب',
+ stepForward_one: '{{count}} قدم به جلو',
+ stepForward_other: '{{count}} قدم به جلو',
+ sessionStart: 'شروع جلسه',
+ currentState: 'وضعیت کنونی',
+ nodeTitleChange: 'عنوان بلوک تغییر کرده است',
+ nodeDescriptionChange: 'توضیحات بلوک تغییر کرده است',
+ nodeDragStop: 'بلوک جابجا شده است',
+ nodeChange: 'بلوک تغییر کرده است',
+ nodeConnect: 'بلوک متصل شده است',
+ nodePaste: 'بلوک چسبانده شده است',
+ nodeDelete: 'بلوک حذف شده است',
+ nodeAdd: 'بلوک اضافه شده است',
+ nodeResize: 'اندازه بلوک تغییر کرده است',
+ noteAdd: 'یادداشت اضافه شده است',
+ noteChange: 'یادداشت تغییر کرده است',
+ noteDelete: 'یادداشت حذف شده است',
+ edgeDelete: 'بلوک قطع شده است',
+ },
+ errorMsg: {
+ fieldRequired: '{{field}} الزامی است',
+ authRequired: 'احراز هویت ضروری است',
+ invalidJson: '{{field}} JSON معتبر نیست',
+ fields: {
+ variable: 'نام متغیر',
+ variableValue: 'مقدار متغیر',
+ code: 'کد',
+ model: 'مدل',
+ rerankModel: 'مدل مجدد رتبهبندی',
+ },
+ invalidVariable: 'متغیر نامعتبر',
+ },
+ singleRun: {
+ testRun: 'اجرای آزمایشی',
+ startRun: 'شروع اجرا',
+ running: 'در حال اجرا',
+ testRunIteration: 'تکرار اجرای آزمایشی',
+ back: 'بازگشت',
+ iteration: 'تکرار',
+ },
+ tabs: {
+ 'searchBlock': 'جستجوی بلوک',
+ 'blocks': 'بلوکها',
+ 'tools': 'ابزارها',
+ 'allTool': 'همه',
+ 'builtInTool': 'درونساخت',
+ 'customTool': 'سفارشی',
+ 'workflowTool': 'جریان کار',
+ 'question-understand': 'درک سوال',
+ 'logic': 'منطق',
+ 'transform': 'تبدیل',
+ 'utilities': 'ابزارهای کاربردی',
+ 'noResult': 'نتیجهای پیدا نشد',
+ },
+ blocks: {
+ 'start': 'شروع',
+ 'end': 'پایان',
+ 'answer': 'پاسخ',
+ 'llm': 'مدل زبان بزرگ',
+ 'knowledge-retrieval': 'استخراج دانش',
+ 'question-classifier': 'دستهبندی سوالات',
+ 'if-else': 'IF/ELSE',
+ 'code': 'کد',
+ 'template-transform': 'الگو',
+ 'http-request': 'درخواست HTTP',
+ 'variable-assigner': 'تخصیصدهنده متغیر',
+ 'variable-aggregator': 'تجمعدهنده متغیر',
+ 'iteration-start': 'شروع تکرار',
+ 'iteration': 'تکرار',
+ 'parameter-extractor': 'استخراجکننده پارامتر',
+ },
+ blocksAbout: {
+ 'start': 'پارامترهای اولیه برای راهاندازی جریان کار را تعریف کنید',
+ 'end': 'پایان و نوع نتیجه یک جریان کار را تعریف کنید',
+ 'answer': 'محتوای پاسخ مکالمه چت را تعریف کنید',
+ 'llm': 'استفاده از مدلهای زبان بزرگ برای پاسخ به سوالات یا پردازش زبان طبیعی',
+ 'knowledge-retrieval': 'اجازه میدهد تا محتوای متنی مرتبط با سوالات کاربر از دانش استخراج شود',
+ 'question-classifier': 'شرایط دستهبندی سوالات کاربر را تعریف کنید، مدل زبان بزرگ میتواند بر اساس توضیحات دستهبندی، نحوه پیشرفت مکالمه را تعریف کند',
+ 'if-else': 'اجازه میدهد تا جریان کار به دو شاخه بر اساس شرایط if/else تقسیم شود',
+ 'code': 'اجرای یک قطعه کد Python یا NodeJS برای پیادهسازی منطق سفارشی',
+ 'template-transform': 'تبدیل دادهها به رشته با استفاده از سینتاکس الگوهای Jinja',
+ 'http-request': 'اجازه میدهد تا درخواستهای سرور از طریق پروتکل HTTP ارسال شوند',
+ 'variable-assigner': 'تجمع متغیرهای چند شاخهای به یک متغیر واحد برای پیکربندی یکپارچه نودهای پاییندستی.',
+ 'variable-aggregator': 'تجمع متغیرهای چند شاخهای به یک متغیر واحد برای پیکربندی یکپارچه نودهای پاییندستی.',
+ 'iteration': 'اجرای چندین مرحله روی یک شیء لیست تا همه نتایج خروجی داده شوند.',
+ 'parameter-extractor': 'استفاده از مدل زبان بزرگ برای استخراج پارامترهای ساختاری از زبان طبیعی برای فراخوانی ابزارها یا درخواستهای HTTP.',
+ },
+ operator: {
+ zoomIn: 'بزرگنمایی',
+ zoomOut: 'کوچکنمایی',
+ zoomTo50: 'بزرگنمایی به 50%',
+ zoomTo100: 'بزرگنمایی به 100%',
+ zoomToFit: 'تناسب با اندازه',
+ },
+ panel: {
+ userInputField: 'فیلد ورودی کاربر',
+ changeBlock: 'تغییر بلوک',
+ helpLink: 'لینک کمک',
+ about: 'درباره',
+ createdBy: 'ساخته شده توسط',
+ nextStep: 'مرحله بعدی',
+ addNextStep: 'افزودن بلوک بعدی به این جریان کار',
+ selectNextStep: 'انتخاب بلوک بعدی',
+ runThisStep: 'اجرا کردن این مرحله',
+ checklist: 'چکلیست',
+ checklistTip: 'اطمینان حاصل کنید که همه مسائل قبل از انتشار حل شدهاند',
+ checklistResolved: 'تمام مسائل حل شدهاند',
+ organizeBlocks: 'سازماندهی بلوکها',
+ change: 'تغییر',
+ },
+ nodes: {
+ common: {
+ outputVars: 'متغیرهای خروجی',
+ insertVarTip: 'درج متغیر',
+ memory: {
+ memory: 'حافظه',
+ memoryTip: 'تنظیمات حافظه چت',
+ windowSize: 'اندازه پنجره',
+ conversationRoleName: 'نام نقش مکالمه',
+ user: 'پیشوند کاربر',
+ assistant: 'پیشوند دستیار',
+ },
+ memories: {
+ title: 'حافظهها',
+ tip: 'حافظه چت',
+ builtIn: 'درونساخت',
+ },
+ },
+ start: {
+ required: 'الزامی',
+ inputField: 'فیلد ورودی',
+ builtInVar: 'متغیرهای درونساخت',
+ outputVars: {
+ query: 'ورودی کاربر',
+ memories: {
+ des: 'تاریخچه مکالمات',
+ type: 'نوع پیام',
+ content: 'محتوای پیام',
+ },
+ files: 'لیست فایلها',
+ },
+ noVarTip: 'ورودیهایی را که میتوان در جریان کار استفاده کرد، تنظیم کنید',
+ },
+ end: {
+ outputs: 'خروجیها',
+ output: {
+ type: 'نوع خروجی',
+ variable: 'متغیر خروجی',
+ },
+ type: {
+ 'none': 'هیچ',
+ 'plain-text': 'متن ساده',
+ 'structured': 'ساختاری',
+ },
+ },
+ answer: {
+ answer: 'پاسخ',
+ outputVars: 'متغیرهای خروجی',
+ },
+ llm: {
+ model: 'مدل',
+ variables: 'متغیرها',
+ context: 'متن',
+ contextTooltip: 'میتوانید دانش را به عنوان متن وارد کنید',
+ notSetContextInPromptTip: 'برای فعال کردن ویژگی متن، لطفاً متغیر متن را در PROMPT پر کنید.',
+ prompt: 'پیشنهاد',
+ roleDescription: {
+ system: 'دستورات سطح بالا برای مکالمه را ارائه دهید',
+ user: 'دستورات، پرسشها، یا هر ورودی متنی را به مدل ارائه دهید',
+ assistant: 'پاسخهای مدل بر اساس پیامهای کاربر',
+ },
+ addMessage: 'افزودن پیام',
+ vision: 'بینایی',
+ files: 'فایلها',
+ resolution: {
+ name: 'وضوح',
+ high: 'بالا',
+ low: 'پایین',
+ },
+ outputVars: {
+ output: 'تولید محتوا',
+ usage: 'اطلاعات استفاده از مدل',
+ },
+ singleRun: {
+ variable: 'متغیر',
+ },
+ sysQueryInUser: 'sys.query در پیام کاربر ضروری است',
+ },
+ knowledgeRetrieval: {
+ queryVariable: 'متغیر جستجو',
+ knowledge: 'دانش',
+ outputVars: {
+ output: 'دادههای تقسیمبندی شده بازیابی',
+ content: 'محتوای تقسیمبندی شده',
+ title: 'عنوان تقسیمبندی شده',
+ icon: 'آیکون تقسیمبندی شده',
+ url: 'URL تقسیمبندی شده',
+ metadata: 'سایر متادادهها',
+ },
+ },
+ http: {
+ inputVars: 'متغیرهای ورودی',
+ api: 'API',
+ apiPlaceholder: 'URL را وارد کنید، برای درج متغیر \' / \' را تایپ کنید',
+ notStartWithHttp: 'API باید با http:// یا https:// شروع شود',
+ key: 'کلید',
+ value: 'مقدار',
+ bulkEdit: 'ویرایش دستهای',
+ keyValueEdit: 'ویرایش کلید-مقدار',
+ headers: 'هدرها',
+ params: 'پارامترها',
+ body: 'بدن',
+ outputVars: {
+ body: 'محتوای پاسخ',
+ statusCode: 'کد وضعیت پاسخ',
+ headers: 'فهرست هدر پاسخ JSON',
+ files: 'لیست فایلها',
+ },
+ authorization: {
+ 'authorization': 'احراز هویت',
+ 'authorizationType': 'نوع احراز هویت',
+ 'no-auth': 'هیچ',
+ 'api-key': 'کلید API',
+ 'auth-type': 'نوع احراز هویت',
+ 'basic': 'پایه',
+ 'bearer': 'دارنده',
+ 'custom': 'سفارشی',
+ 'api-key-title': 'کلید API',
+ 'header': 'هدر',
+ },
+ insertVarPlaceholder: 'برای درج متغیر \'/\' را تایپ کنید',
+ timeout: {
+ title: 'زمانتوقف',
+ connectLabel: 'زمانتوقف اتصال',
+ connectPlaceholder: 'زمانتوقف اتصال را به ثانیه وارد کنید',
+ readLabel: 'زمانتوقف خواندن',
+ readPlaceholder: 'زمانتوقف خواندن را به ثانیه وارد کنید',
+ writeLabel: 'زمانتوقف نوشتن',
+ writePlaceholder: 'زمانتوقف نوشتن را به ثانیه وارد کنید',
+ },
+ },
+ code: {
+ inputVars: 'متغیرهای ورودی',
+ outputVars: 'متغیرهای خروجی',
+ advancedDependencies: 'وابستگیهای پیشرفته',
+ advancedDependenciesTip: 'برخی وابستگیهای پیشبارگذاری شده که زمان بیشتری برای مصرف نیاز دارند یا به طور پیشفرض در اینجا موجود نیستند، اضافه کنید',
+ searchDependencies: 'جستجوی وابستگیها',
+ },
+ templateTransform: {
+ inputVars: 'متغیرهای ورودی',
+ code: 'کد',
+ codeSupportTip: 'فقط Jinja2 را پشتیبانی میکند',
+ outputVars: {
+ output: 'محتوای تبدیلشده',
+ },
+ },
+ ifElse: {
+ if: 'اگر',
+ else: 'در غیر این صورت',
+ elseDescription: 'برای تعریف منطق که باید زمانی که شرط if برآورده نشود، اجرا شود.',
+ and: 'و',
+ or: 'یا',
+ operator: 'عملگر',
+ notSetVariable: 'لطفاً ابتدا متغیر را تنظیم کنید',
+ comparisonOperator: {
+ 'contains': 'شامل',
+ 'not contains': 'شامل نمیشود',
+ 'start with': 'شروع با',
+ 'end with': 'پایان با',
+ 'is': 'است',
+ 'is not': 'نیست',
+ 'empty': 'خالی است',
+ 'not empty': 'خالی نیست',
+ 'null': 'خالی',
+ 'not null': 'خالی نیست',
+ },
+ enterValue: 'مقدار را وارد کنید',
+ addCondition: 'افزودن شرط',
+ conditionNotSetup: 'شرط تنظیم نشده است',
+ selectVariable: 'متغیر را انتخاب کنید...',
+ },
+ variableAssigner: {
+ title: 'تخصیص متغیرها',
+ outputType: 'نوع خروجی',
+ varNotSet: 'متغیر تنظیم نشده است',
+ noVarTip: 'متغیرهایی را که باید اختصاص داده شوند اضافه کنید',
+ type: {
+ string: 'رشته',
+ number: 'عدد',
+ object: 'شیء',
+ array: 'آرایه',
+ },
+ aggregationGroup: 'گروه تجمع',
+ aggregationGroupTip: 'فعال کردن این ویژگی اجازه میدهد تا تجمعکننده متغیرها چندین مجموعه متغیر را تجمیع کند.',
+ addGroup: 'افزودن گروه',
+ outputVars: {
+ varDescribe: '{{groupName}} خروجی',
+ },
+ setAssignVariable: 'تعیین متغیر تخصیص یافته',
+ },
+ tool: {
+ toAuthorize: 'برای مجوز دادن',
+ inputVars: 'متغیرهای ورودی',
+ outputVars: {
+ text: 'محتوای تولید شده توسط ابزار',
+ files: {
+ title: 'فایلهای تولید شده توسط ابزار',
+ type: 'نوع پشتیبانی. در حال حاضر فقط تصاویر پشتیبانی میشود',
+ transfer_method: 'روش انتقال. مقدار آن remote_url یا local_file است',
+ url: 'URL تصویر',
+ upload_file_id: 'شناسه فایل آپلود شده',
+ },
+ json: 'json تولید شده توسط ابزار',
+ },
+ },
+ questionClassifiers: {
+ model: 'مدل',
+ inputVars: 'متغیرهای ورودی',
+ outputVars: {
+ className: 'نام کلاس',
+ },
+ class: 'کلاس',
+ classNamePlaceholder: 'نام کلاس خود را بنویسید',
+ advancedSetting: 'تنظیمات پیشرفته',
+ topicName: 'نام موضوع',
+ topicPlaceholder: 'نام موضوع خود را بنویسید',
+ addClass: 'افزودن کلاس',
+ instruction: 'دستورالعمل',
+ instructionTip: 'دستورالعملهای اضافی را برای کمک به دستهبند سوالات برای درک بهتر نحوه دستهبندی سوالات وارد کنید.',
+ instructionPlaceholder: 'دستورالعمل خود را بنویسید',
+ },
+ parameterExtractor: {
+ inputVar: 'متغیر ورودی',
+ extractParameters: 'استخراج پارامترها',
+ importFromTool: 'وارد کردن از ابزارها',
+ addExtractParameter: 'افزودن پارامتر استخراج شده',
+ addExtractParameterContent: {
+ name: 'نام',
+ namePlaceholder: 'نام پارامتر استخراج شده',
+ type: 'نوع',
+ typePlaceholder: 'نوع پارامتر استخراج شده',
+ description: 'توضیحات',
+ descriptionPlaceholder: 'توضیحات پارامتر استخراج شده',
+ required: 'الزامی',
+ requiredContent: 'الزامی فقط به عنوان مرجع برای استنتاج مدل استفاده میشود و برای اعتبارسنجی اجباری خروجی پارامتر نیست.',
+ },
+ extractParametersNotSet: 'پارامترهای استخراج شده تنظیم نشدهاند',
+ instruction: 'دستورالعمل',
+ instructionTip: 'دستورالعملهای اضافی را برای کمک به استخراجکننده پارامتر برای درک نحوه استخراج پارامترها وارد کنید.',
+ advancedSetting: 'تنظیمات پیشرفته',
+ reasoningMode: 'حالت استدلال',
+ reasoningModeTip: 'میتوانید حالت استدلال مناسب را بر اساس توانایی مدل برای پاسخ به دستورات برای فراخوانی عملکردها یا پیشنهادات انتخاب کنید.',
+ isSuccess: 'موفقیتآمیز است. در صورت موفقیت مقدار 1 و در صورت شکست مقدار 0 است.',
+ errorReason: 'دلیل خطا',
+ },
+ iteration: {
+ deleteTitle: 'حذف نود تکرار؟',
+ deleteDesc: 'حذف نود تکرار باعث حذف تمام نودهای فرزند خواهد شد',
+ input: 'ورودی',
+ output: 'متغیرهای خروجی',
+ iteration_one: '{{count}} تکرار',
+ iteration_other: '{{count}} تکرارها',
+ currentIteration: 'تکرار فعلی',
+ },
+ note: {
+ addNote: 'افزودن یادداشت',
+ editor: {
+ placeholder: 'یادداشت خود را بنویسید...',
+ small: 'کوچک',
+ medium: 'متوسط',
+ large: 'بزرگ',
+ bold: 'پررنگ',
+ italic: 'ایتالیک',
+ strikethrough: 'خطخورده',
+ link: 'لینک',
+ openLink: 'باز کردن',
+ unlink: 'حذف لینک',
+ enterUrl: 'URL را وارد کنید...',
+ invalidUrl: 'URL نامعتبر',
+ bulletList: 'فهرست گلولهای',
+ showAuthor: 'نمایش نویسنده',
+ },
+ },
+ },
+ tracing: {
+ stopBy: 'متوقف شده توسط {{user}}',
+ },
+}
+
+export default translation
diff --git a/web/i18n/fr-FR/app.ts b/web/i18n/fr-FR/app.ts
index 8e794715761c4c..d89208c4245086 100644
--- a/web/i18n/fr-FR/app.ts
+++ b/web/i18n/fr-FR/app.ts
@@ -85,6 +85,42 @@ const translation = {
workflow: 'Flux de travail',
completion: 'Terminaison',
},
+ tracing: {
+ title: 'Traçage des performances de l\'application',
+ description: 'Configuration d\'un fournisseur LLMOps tiers et traçage des performances de l\'application.',
+ config: 'Configurer',
+ collapse: 'Réduire',
+ expand: 'Développer',
+ tracing: 'Traçage',
+ disabled: 'Désactivé',
+ disabledTip: 'Veuillez d\'abord configurer le fournisseur',
+ enabled: 'En service',
+ tracingDescription: 'Capturez le contexte complet de l\'exécution de l\'application, y compris les appels LLM, le contexte, les prompts, les requêtes HTTP et plus encore, vers une plateforme de traçage tierce.',
+ configProviderTitle: {
+ configured: 'Configuré',
+ notConfigured: 'Configurez le fournisseur pour activer le traçage',
+ moreProvider: 'Plus de fournisseurs',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'Une plateforme de développement tout-en-un pour chaque étape du cycle de vie des applications basées sur LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Traces, évaluations, gestion des prompts et métriques pour déboguer et améliorer votre application LLM.',
+ },
+ inUse: 'En utilisation',
+ configProvider: {
+ title: 'Configurer ',
+ placeholder: 'Entrez votre {{key}}',
+ project: 'Projet',
+ publicKey: 'Clé Publique',
+ secretKey: 'Clé Secrète',
+ viewDocsLink: 'Voir la documentation de {{key}}',
+ removeConfirmTitle: 'Supprimer la configuration de {{key}} ?',
+ removeConfirmContent: 'La configuration actuelle est en cours d\'utilisation, sa suppression désactivera la fonction de Traçage.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/fr-FR/common.ts b/web/i18n/fr-FR/common.ts
index bdd94e7a16e803..424d8e4a991d6f 100644
--- a/web/i18n/fr-FR/common.ts
+++ b/web/i18n/fr-FR/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Annuler',
clear: 'Effacer',
save: 'Enregistrer',
+ saveAndEnable: 'Enregistrer et Activer',
edit: 'Modifier',
add: 'Ajouter',
added: 'Ajouté',
@@ -408,7 +409,7 @@ const translation = {
latestAvailable: 'Dify {{version}} est la dernière version disponible.',
},
appMenus: {
- overview: 'Aperçu',
+ overview: 'Surveillance',
promptEng: 'Orchestrer',
apiAccess: 'Accès API',
logAndAnn: 'Journaux & Annonces.',
diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts
index c4b97870d15a46..6df78331e4c149 100644
--- a/web/i18n/fr-FR/workflow.ts
+++ b/web/i18n/fr-FR/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Retour à l\'éditeur',
conversationLog: 'Journal de conversation',
features: 'Fonctionnalités',
- debugAndPreview: 'Déboguer et prévisualiser',
+ debugAndPreview: 'Aperçu',
restart: 'Redémarrer',
currentDraft: 'Brouillon actuel',
currentDraftUnpublished: 'Brouillon actuel non publié',
diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts
index 9286f74c1c16b2..29b0451b2cfa2a 100644
--- a/web/i18n/hi-IN/app.ts
+++ b/web/i18n/hi-IN/app.ts
@@ -85,6 +85,42 @@ const translation = {
workflow: 'वर्कफ़्लो',
completion: 'समाप्ति',
},
+ tracing: {
+ title: 'एप्लिकेशन प्रदर्शन ट्रेसिंग',
+ description: 'तृतीय-पक्ष LLMOps प्रदाता को कॉन्फ़िगर करना और एप्लिकेशन प्रदर्शन का ट्रेस करना।',
+ config: 'कॉन्फ़िगर करें',
+ collapse: 'संकुचित करें',
+ expand: 'विस्तृत करें',
+ tracing: 'ट्रेसिंग',
+ disabled: 'अक्षम',
+ disabledTip: 'कृपया पहले प्रदाता को कॉन्फ़िगर करें',
+ enabled: 'सेवा में',
+ tracingDescription: 'एप्लिकेशन निष्पादन का पूरा संदर्भ कैप्चर करें, जिसमें LLM कॉल, संदर्भ, प्रॉम्प्ट्स, HTTP अनुरोध और अधिक शामिल हैं, एक तृतीय-पक्ष ट्रेसिंग प्लेटफ़ॉर्म पर।',
+ configProviderTitle: {
+ configured: 'कॉन्फ़िगर किया गया',
+ notConfigured: 'ट्रेसिंग सक्षम करने के लिए प्रदाता कॉन्फ़िगर करें',
+ moreProvider: 'अधिक प्रदाता',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'LLM-संचालित एप्लिकेशन जीवनचक्र के प्रत्येक चरण के लिए एक ऑल-इन-वन डेवलपर प्लेटफ़ॉर्म।',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'आपके LLM एप्लिकेशन को डीबग और सुधारने के लिए ट्रेस, मूल्यांकन, प्रॉम्प्ट प्रबंधन और मेट्रिक्स।',
+ },
+ inUse: 'उपयोग में',
+ configProvider: {
+ title: 'कॉन्फ़िगर करें ',
+ placeholder: 'अपना {{key}} दर्ज करें',
+ project: 'प्रोजेक्ट',
+ publicKey: 'सार्वजनिक कुंजी',
+ secretKey: 'गुप्त कुंजी',
+ viewDocsLink: '{{key}} दस्तावेज़ देखें',
+ removeConfirmTitle: '{{key}} कॉन्फ़िगरेशन हटाएं?',
+ removeConfirmContent: 'वर्तमान कॉन्फ़िगरेशन उपयोग में है, इसे हटाने से ट्रेसिंग सुविधा बंद हो जाएगी।',
+ },
+ },
}
export default translation
diff --git a/web/i18n/hi-IN/common.ts b/web/i18n/hi-IN/common.ts
index 6f1385cb3df73e..2be7cddf8f5fb0 100644
--- a/web/i18n/hi-IN/common.ts
+++ b/web/i18n/hi-IN/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'रद्द करें',
clear: 'साफ करें',
save: 'सहेजें',
+ saveAndEnable: 'सहेजें और सक्षम करें',
edit: 'संपादित करें',
add: 'जोड़ें',
added: 'जोड़ा गया',
@@ -454,7 +455,7 @@ const translation = {
latestAvailable: 'Dify {{version}} नवीनतम उपलब्ध संस्करण है।',
},
appMenus: {
- overview: 'अवलोकन',
+ overview: 'निगरानी',
promptEng: 'समन्वय करें',
apiAccess: 'API एक्सेस',
logAndAnn: 'लॉग्स और घोषणाएँ',
diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts
index 740fa09988c7c3..3233d75bfd3fb2 100644
--- a/web/i18n/hi-IN/workflow.ts
+++ b/web/i18n/hi-IN/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'संपादक पर वापस जाएं',
conversationLog: 'वार्तालाप लॉग',
features: 'विशेषताएं',
- debugAndPreview: 'डीबग और पूर्वावलोकन',
+ debugAndPreview: 'पूर्वावलोकन',
restart: 'पुनः आरंभ करें',
currentDraft: 'वर्तमान ड्राफ्ट',
currentDraftUnpublished: 'वर्तमान ड्राफ्ट अप्रकाशित',
@@ -74,6 +74,27 @@ const translation = {
viewDetailInTracingPanel: 'विवरण देखें',
syncingData: 'डेटा सिंक हो रहा है, बस कुछ सेकंड।',
},
+ env: {
+ envPanelTitle: 'पर्यावरण चर',
+ envDescription: 'पर्यावरण चर का उपयोग निजी जानकारी और क्रेडेंशियल्स को संग्रहित करने के लिए किया जा सकता है। वे केवल पढ़ने योग्य हैं और निर्यात के दौरान DSL फ़ाइल से अलग किए जा सकते हैं।',
+ envPanelButton: 'चर जोड़ें',
+ modal: {
+ title: 'पर्यावरण चर जोड़ें',
+ editTitle: 'पर्यावरण चर संपादित करें',
+ type: 'प्रकार',
+ name: 'नाम',
+ namePlaceholder: 'पर्यावरण नाम',
+ value: 'मान',
+ valuePlaceholder: 'पर्यावरण मान',
+ secretTip: 'संवेदनशील जानकारी या डेटा को परिभाषित करने के लिए उपयोग किया जाता है, DSL सेटिंग्स लीक रोकथाम के लिए कॉन्फ़िगर की गई हैं।',
+ },
+ export: {
+ title: 'गुप्त पर्यावरण चर निर्यात करें?',
+ checkbox: 'गुप्त मान निर्यात करें',
+ ignore: 'DSL निर्यात करें',
+ export: 'गुप्त मानों के साथ DSL निर्यात करें',
+ },
+ },
changeHistory: {
title: 'परिवर्तन इतिहास',
placeholder: 'आपने अभी तक कुछ भी नहीं बदला है',
diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts
index 6c009fdf697271..4ab8a7a11105e0 100644
--- a/web/i18n/it-IT/workflow.ts
+++ b/web/i18n/it-IT/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Torna all\'editor',
conversationLog: 'Registro conversazioni',
features: 'Caratteristiche',
- debugAndPreview: 'Debug e Anteprima',
+ debugAndPreview: 'Anteprima',
restart: 'Riavvia',
currentDraft: 'Bozza corrente',
currentDraftUnpublished: 'Bozza corrente non pubblicata',
@@ -82,6 +82,27 @@ const translation = {
importFailure: 'Importazione fallita',
importSuccess: 'Importazione riuscita',
},
+ env: {
+ envPanelTitle: 'Variabili d\'Ambiente',
+ envDescription: 'Le variabili d\'ambiente possono essere utilizzate per memorizzare informazioni private e credenziali. Sono di sola lettura e possono essere separate dal file DSL durante l\'esportazione.',
+ envPanelButton: 'Aggiungi Variabile',
+ modal: {
+ title: 'Aggiungi Variabile d\'Ambiente',
+ editTitle: 'Modifica Variabile d\'Ambiente',
+ type: 'Tipo',
+ name: 'Nome',
+ namePlaceholder: 'nome env',
+ value: 'Valore',
+ valuePlaceholder: 'valore env',
+ secretTip: 'Utilizzato per definire informazioni o dati sensibili, con impostazioni DSL configurate per la prevenzione delle fughe.',
+ },
+ export: {
+ title: 'Esportare variabili d\'ambiente segrete?',
+ checkbox: 'Esporta valori segreti',
+ ignore: 'Esporta DSL',
+ export: 'Esporta DSL con valori segreti',
+ },
+ },
changeHistory: {
title: 'Cronologia Modifiche',
placeholder: 'Non hai ancora modificato nulla',
diff --git a/web/i18n/ja-JP/app.ts b/web/i18n/ja-JP/app.ts
index 91bca235f5f3a5..2ca3109bac9f45 100644
--- a/web/i18n/ja-JP/app.ts
+++ b/web/i18n/ja-JP/app.ts
@@ -94,7 +94,7 @@ const translation = {
title: 'アプリのパフォーマンスの追跡',
description: 'サードパーティのLLMOpsサービスとトレースアプリケーションのパフォーマンス設定を行います。',
config: '設定',
- collapse: 'Collapse',
+ collapse: '折りたたむ',
expand: '展開',
tracing: '追跡',
disabled: '無効しました',
diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts
index 76d58fa944fbca..f3fb8466f1db14 100644
--- a/web/i18n/ja-JP/common.ts
+++ b/web/i18n/ja-JP/common.ts
@@ -443,7 +443,7 @@ const translation = {
latestAvailable: 'Dify {{version}} が最新バージョンです。',
},
appMenus: {
- overview: '概要',
+ overview: '監視',
promptEng: 'オーケストレート',
apiAccess: 'APIアクセス',
logAndAnn: 'ログ&アナウンス',
diff --git a/web/i18n/ja-JP/dataset-documents.ts b/web/i18n/ja-JP/dataset-documents.ts
index 53af92aa60b51d..654ae0ef6c3236 100644
--- a/web/i18n/ja-JP/dataset-documents.ts
+++ b/web/i18n/ja-JP/dataset-documents.ts
@@ -221,7 +221,7 @@ const translation = {
paragraphs: '段落',
hitCount: '検索回数',
embeddingTime: '埋め込み時間',
- embeddedSpend: '埋め込み時間',
+ embeddedSpend: '埋め込みコスト',
},
},
languageMap: {
diff --git a/web/i18n/ja-JP/dataset.ts b/web/i18n/ja-JP/dataset.ts
index 787b1c1e11948e..2cdc819cd5dadb 100644
--- a/web/i18n/ja-JP/dataset.ts
+++ b/web/i18n/ja-JP/dataset.ts
@@ -60,7 +60,7 @@ const translation = {
rerankSettings: 'Rerank設定',
weightedScore: {
title: 'ウェイト設定',
- description: '割り当てられた重みを調整することで、並べ替え戦略はセマンティックマッチングとキーワードマッチングのどちらを優先するかを決定します。',
+ description: '代入られた重みを調整することで、並べ替え戦略はセマンティックマッチングとキーワードマッチングのどちらを優先するかを決定します。',
semanticFirst: 'セマンティック優先',
keywordFirst: 'キーワード優先',
customized: 'カスタマイズ',
diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts
index ed0eebb8db7f13..8f506bcb46bd3e 100644
--- a/web/i18n/ja-JP/workflow.ts
+++ b/web/i18n/ja-JP/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: '編集に戻る',
conversationLog: '会話ログ',
features: '機能',
- debugAndPreview: 'デバッグとプレビュー',
+ debugAndPreview: 'プレビュー',
restart: '再起動',
currentDraft: '現在の下書き',
currentDraftUnpublished: '現在の下書き(未公開)',
@@ -94,11 +94,38 @@ const translation = {
},
export: {
title: 'シークレット環境変数をエクスポートしますか?',
- checkbox: 'シクレート値をエクスポート',
+ checkbox: 'シークレット値をエクスポート',
ignore: 'DSLをエクスポート',
- export: 'シクレート値を含むDSLをエクスポート',
+ export: 'シークレット値を含むDSLをエクスポート',
},
},
+ chatVariable: {
+ panelTitle: '会話変数',
+ panelDescription: '会話変数は、LLMが記憶すべき対話情報を保存するために使用されます。この情報には、対話の履歴、アップロードされたファイル、ユーザーの好みなどが含まれます。読み書きが可能です。',
+ docLink: '詳しくはドキュメントをご覧ください。',
+ button: '変数を追加',
+ modal: {
+ title: '会話変数を追加',
+ editTitle: '会話変数を編集',
+ name: '名前',
+ namePlaceholder: '変数名前',
+ type: 'タイプ',
+ value: 'デフォルト値',
+ valuePlaceholder: 'デフォルト値、設定しない場合は空白にしでください',
+ description: '説明',
+ descriptionPlaceholder: '変数の説明',
+ editInJSON: 'JSONで編集する',
+ oneByOne: '次々に追加する',
+ editInForm: 'フォームで編集',
+ arrayValue: '値',
+ addArrayValue: '値を追加',
+ objectKey: 'キー',
+ objectType: 'タイプ',
+ objectValue: 'デフォルト値',
+ },
+ storedContent: '保存されたコンテンツ',
+ updatedAt: '更新日は',
+ },
changeHistory: {
title: '変更履歴',
placeholder: 'まだ何も変更していません',
@@ -149,6 +176,7 @@ const translation = {
tabs: {
'searchBlock': 'ブロックを検索',
'blocks': 'ブロック',
+ 'searchTool': '検索ツール',
'tools': 'ツール',
'allTool': 'すべて',
'workflowTool': 'ワークフロー',
@@ -171,8 +199,9 @@ const translation = {
'code': 'コード',
'template-transform': 'テンプレート',
'http-request': 'HTTPリクエスト',
- 'variable-assigner': '変数割り当て',
+ 'variable-assigner': '変数代入器',
'variable-aggregator': '変数集約器',
+ 'assigner': '変数代入',
'iteration-start': 'イテレーション開始',
'iteration': 'イテレーション',
'parameter-extractor': 'パラメーター抽出',
@@ -189,6 +218,7 @@ const translation = {
'template-transform': 'Jinjaテンプレート構文を使用してデータを文字列に変換します',
'http-request': 'HTTPプロトコル経由でサーバーリクエストを送信できます',
'variable-assigner': '複数のブランチの変数を1つの変数に集約し、下流のノードに対して統一された設定を行います。',
+ 'assigner': '変数代入ノードは、書き込み可能な変数(例えば、会話変数)に値を割り当てるために使用されます。',
'variable-aggregator': '複数のブランチの変数を1つの変数に集約し、下流のノードに対して統一された設定を行います。',
'iteration': 'リストオブジェクトに対して複数のステップを実行し、すべての結果が出力されるまで繰り返します。',
'parameter-extractor': '自然言語からツールの呼び出しやHTTPリクエストのための構造化されたパラメーターを抽出するためにLLMを使用します。',
@@ -215,6 +245,7 @@ const translation = {
checklistResolved: 'すべての問題が解決されました',
organizeBlocks: 'ブロックを整理',
change: '変更',
+ optional: '(オプション)',
},
nodes: {
common: {
@@ -387,11 +418,11 @@ const translation = {
conditionNotSetup: '条件が設定されていません',
},
variableAssigner: {
- title: '変数を割り当てる',
+ title: '変数を代入する',
outputType: '出力タイプ',
outputVarType: '出力変数のタイプ',
varNotSet: '変数が設定されていません',
- noVarTip: '割り当てる変数を追加してください',
+ noVarTip: '代入された変数を追加してください',
type: {
string: '文字列',
number: '数値',
@@ -404,7 +435,18 @@ const translation = {
outputVars: {
varDescribe: '{{groupName}} 出力',
},
- setAssignVariable: '割り当て変数を設定',
+ setAssignVariable: '代入された変数を設定',
+ },
+ assigner: {
+ 'assignedVariable': '代入された変数',
+ 'writeMode': '書き込みモード',
+ 'writeModeTip': '代入された変数が配列の場合, 末尾に追記モードを追加する。',
+ 'over-write': '上書き',
+ 'append': '追記',
+ 'plus': 'プラス',
+ 'clear': 'クリア',
+ 'setVariable': '変数を設定する',
+ 'variable': '変数',
},
tool: {
toAuthorize: '承認するには',
diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts
index d9fb7b378852d4..0e9a11556a012d 100644
--- a/web/i18n/ko-KR/app.ts
+++ b/web/i18n/ko-KR/app.ts
@@ -81,6 +81,42 @@ const translation = {
workflow: '워크플로우',
completion: '완성',
},
+ tracing: {
+ title: '앱 성능 추적',
+ description: '제3자 LLMOps 제공업체 구성 및 앱 성능 추적.',
+ config: '구성',
+ collapse: '접기',
+ expand: '펼치기',
+ tracing: '추적',
+ disabled: '비활성화됨',
+ disabledTip: '먼저 제공업체를 구성해 주세요',
+ enabled: '서비스 중',
+ tracingDescription: 'LLM 호출, 컨텍스트, 프롬프트, HTTP 요청 등 앱 실행의 전체 컨텍스트를 제3자 추적 플랫폼에 캡처합니다.',
+ configProviderTitle: {
+ configured: '구성됨',
+ notConfigured: '추적을 활성화하려면 제공업체를 구성하세요',
+ moreProvider: '더 많은 제공업체',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'LLM 기반 애플리케이션 수명 주기의 모든 단계를 위한 올인원 개발자 플랫폼.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'LLM 애플리케이션을 디버그하고 개선하기 위한 추적, 평가, 프롬프트 관리 및 메트릭.',
+ },
+ inUse: '사용 중',
+ configProvider: {
+ title: '구성 ',
+ placeholder: '{{key}}를 입력하세요',
+ project: '프로젝트',
+ publicKey: '공개 키',
+ secretKey: '비밀 키',
+ viewDocsLink: '{{key}} 문서 보기',
+ removeConfirmTitle: '{{key}} 구성을 제거하시겠습니까?',
+ removeConfirmContent: '현재 구성이 사용 중입니다. 제거하면 추적 기능이 꺼집니다.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts
index d517dc9346c70b..9e785100784e3e 100644
--- a/web/i18n/ko-KR/common.ts
+++ b/web/i18n/ko-KR/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: '취소',
clear: '지우기',
save: '저장',
+ saveAndEnable: '저장 및 활성화',
edit: '편집',
add: '추가',
added: '추가됨',
@@ -404,7 +405,7 @@ const translation = {
latestAvailable: 'Dify {{version}} 최신 버전입니다.',
},
appMenus: {
- overview: '개요',
+ overview: '모니터링',
promptEng: '오케스트레이트',
apiAccess: 'API 액세스',
logAndAnn: '로그 및 어노테이션',
diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts
index 1cb6384c197be6..4ea78c1fce6328 100644
--- a/web/i18n/ko-KR/workflow.ts
+++ b/web/i18n/ko-KR/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: '편집기로 돌아가기',
conversationLog: '대화 로그',
features: '기능',
- debugAndPreview: '디버그 및 미리보기',
+ debugAndPreview: '미리보기',
restart: '재시작',
currentDraft: '현재 초안',
currentDraftUnpublished: '현재 초안 미발행',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: '워크플로우 업데이트 후 도구 재구성이 필요합니다.',
viewDetailInTracingPanel: '세부 정보 보기',
},
+ env: {
+ envPanelTitle: '환경 변수',
+ envDescription: '환경 변수는 개인 정보와 자격 증명을 저장하는 데 사용될 수 있습니다. 이들은 읽기 전용이며 내보내기 중에 DSL 파일과 분리할 수 있습니다.',
+ envPanelButton: '변수 추가',
+ modal: {
+ title: '환경 변수 추가',
+ editTitle: '환경 변수 편집',
+ type: '유형',
+ name: '이름',
+ namePlaceholder: '환경 이름',
+ value: '값',
+ valuePlaceholder: '환경 값',
+ secretTip: '민감한 정보나 데이터를 정의하는 데 사용되며, DSL 설정은 유출 방지를 위해 구성됩니다.',
+ },
+ export: {
+ title: '비밀 환경 변수를 내보내시겠습니까?',
+ checkbox: '비밀 값 내보내기',
+ ignore: 'DSL 내보내기',
+ export: '비밀 값이 포함된 DSL 내보내기',
+ },
+ },
changeHistory: {
title: '변경 기록',
placeholder: '아직 아무 것도 변경하지 않았습니다',
diff --git a/web/i18n/language.ts b/web/i18n/language.ts
index 3320dc8ff1c1c1..e65d34d0ffc0eb 100644
--- a/web/i18n/language.ts
+++ b/web/i18n/language.ts
@@ -23,6 +23,7 @@ export type I18nText = {
'ro-RO': string
'pl-PL': string
'hi-IN': string
+ 'fa-IR': string
}
export const languages = data.languages
@@ -50,6 +51,7 @@ export const NOTICE_I18N = {
uk_UA: 'Важливе повідомлення',
vi_VN: 'Thông báo quan trọng',
it_IT: 'Avviso Importante',
+ fa_IR: 'هشدار مهم',
},
desc: {
en_US:
@@ -76,6 +78,8 @@ export const NOTICE_I18N = {
'Hệ thống của chúng tôi sẽ ngừng hoạt động từ 19:00 đến 24:00 UTC vào ngày 28 tháng 8 để nâng cấp. Nếu có thắc mắc, vui lòng liên hệ với nhóm hỗ trợ của chúng tôi (support@dify.ai). Chúng tôi đánh giá cao sự kiên nhẫn của bạn.',
tr_TR:
'Sistemimiz, 28 Ağustos\'ta 19:00 ile 24:00 UTC saatleri arasında güncelleme nedeniyle kullanılamayacaktır. Sorularınız için lütfen destek ekibimizle iletişime geçin (support@dify.ai). Sabrınız için teşekkür ederiz.',
+ fa_IR:
+ 'سیستم ما از ساعت 19:00 تا 24:00 UTC در تاریخ 28 اوت برای ارتقاء در دسترس نخواهد بود. برای سؤالات، لطفاً با تیم پشتیبانی ما (support@dify.ai) تماس بگیرید. ما برای صبر شما ارزش قائلیم.',
},
href: '#',
}
diff --git a/web/i18n/languages.json b/web/i18n/languages.json
index c2aa7af717f1f1..d819e490897cfd 100644
--- a/web/i18n/languages.json
+++ b/web/i18n/languages.json
@@ -132,6 +132,13 @@
"prompt_name": "Türkçe",
"example": "Selam!",
"supported": "true"
+ },
+ {
+ "value": "fa-IR",
+ "name": "Farsi (Iran)",
+ "prompt_name": "Farsi",
+ "example": "سلام, دیفای!",
+ "supported": true
}
]
}
diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts
index 1bf6e2f3764943..3a54f3ec3ff797 100644
--- a/web/i18n/pl-PL/app.ts
+++ b/web/i18n/pl-PL/app.ts
@@ -92,6 +92,42 @@ const translation = {
workflow: 'Przepływ pracy',
completion: 'Zakończenie',
},
+ tracing: {
+ title: 'Śledzenie wydajności aplikacji',
+ description: 'Konfiguracja zewnętrznego dostawcy LLMOps i śledzenie wydajności aplikacji.',
+ config: 'Konfiguruj',
+ collapse: 'Zwiń',
+ expand: 'Rozwiń',
+ tracing: 'Śledzenie',
+ disabled: 'Wyłączone',
+ disabledTip: 'Najpierw skonfiguruj dostawcę',
+ enabled: 'W użyciu',
+ tracingDescription: 'Przechwytywanie pełnego kontekstu wykonania aplikacji, w tym wywołań LLM, kontekstu, promptów, żądań HTTP i więcej, do platformy śledzenia stron trzecich.',
+ configProviderTitle: {
+ configured: 'Skonfigurowano',
+ notConfigured: 'Skonfiguruj dostawcę, aby włączyć śledzenie',
+ moreProvider: 'Więcej dostawców',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'Kompleksowa platforma deweloperska dla każdego etapu cyklu życia aplikacji opartej na LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Śledzenie, oceny, zarządzanie promptami i metryki do debugowania i ulepszania twojej aplikacji LLM.',
+ },
+ inUse: 'W użyciu',
+ configProvider: {
+ title: 'Konfiguruj ',
+ placeholder: 'Wprowadź swój {{key}}',
+ project: 'Projekt',
+ publicKey: 'Klucz publiczny',
+ secretKey: 'Klucz tajny',
+ viewDocsLink: 'Zobacz dokumentację {{key}}',
+ removeConfirmTitle: 'Usunąć konfigurację {{key}}?',
+ removeConfirmContent: 'Obecna konfiguracja jest w użyciu, jej usunięcie wyłączy funkcję Śledzenia.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts
index d9916e9d387478..39572ce09b2692 100644
--- a/web/i18n/pl-PL/common.ts
+++ b/web/i18n/pl-PL/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Anuluj',
clear: 'Wyczyść',
save: 'Zapisz',
+ saveAndEnable: 'Zapisz i Włącz',
edit: 'Edytuj',
add: 'Dodaj',
added: 'Dodano',
@@ -422,7 +423,7 @@ const translation = {
latestAvailable: 'Dify {{version}} jest najnowszą dostępną wersją.',
},
appMenus: {
- overview: 'Przegląd',
+ overview: 'Monitorowanie',
promptEng: 'Orkiestracja',
apiAccess: 'Dostęp API',
logAndAnn: 'Logi i ogł.',
diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts
index 6cbe0588bcc8a6..cee5051824683b 100644
--- a/web/i18n/pl-PL/workflow.ts
+++ b/web/i18n/pl-PL/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Wróć do edytora',
conversationLog: 'Dziennik rozmów',
features: 'Funkcje',
- debugAndPreview: 'Debugowanie i podgląd',
+ debugAndPreview: 'Podgląd',
restart: 'Uruchom ponownie',
currentDraft: 'Bieżący szkic',
currentDraftUnpublished: 'Bieżący szkic nieopublikowany',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: 'Wymagana rekonfiguracja narzędzia po aktualizacji przepływu pracy.',
viewDetailInTracingPanel: 'Zobacz szczegóły',
},
+ env: {
+ envPanelTitle: 'Zmienne Środowiskowe',
+ envDescription: 'Zmienne środowiskowe mogą być używane do przechowywania prywatnych informacji i poświadczeń. Są one tylko do odczytu i mogą być oddzielone od pliku DSL podczas eksportu.',
+ envPanelButton: 'Dodaj Zmienną',
+ modal: {
+ title: 'Dodaj Zmienną Środowiskową',
+ editTitle: 'Edytuj Zmienną Środowiskową',
+ type: 'Typ',
+ name: 'Nazwa',
+ namePlaceholder: 'nazwa środowiska',
+ value: 'Wartość',
+ valuePlaceholder: 'wartość środowiska',
+ secretTip: 'Używane do definiowania wrażliwych informacji lub danych, z ustawieniami DSL skonfigurowanymi do zapobiegania wyciekom.',
+ },
+ export: {
+ title: 'Eksportować tajne zmienne środowiskowe?',
+ checkbox: 'Eksportuj tajne wartości',
+ ignore: 'Eksportuj DSL',
+ export: 'Eksportuj DSL z tajnymi wartościami',
+ },
+ },
changeHistory: {
title: 'Historia Zmian',
placeholder: 'Nie dokonano jeszcze żadnych zmian',
diff --git a/web/i18n/pt-BR/app.ts b/web/i18n/pt-BR/app.ts
index e12a2b62fa3558..bd5282e760df8b 100644
--- a/web/i18n/pt-BR/app.ts
+++ b/web/i18n/pt-BR/app.ts
@@ -85,6 +85,42 @@ const translation = {
workflow: 'Fluxo de trabalho',
completion: 'Conclusão',
},
+ tracing: {
+ title: 'Rastreamento de desempenho do aplicativo',
+ description: 'Configurando um provedor LLMOps de terceiros e rastreando o desempenho do aplicativo.',
+ config: 'Configurar',
+ collapse: 'Recolher',
+ expand: 'Expandir',
+ tracing: 'Rastreamento',
+ disabled: 'Desativado',
+ disabledTip: 'Por favor, configure o provedor primeiro',
+ enabled: 'Em serviço',
+ tracingDescription: 'Captura o contexto completo da execução do aplicativo, incluindo chamadas LLM, contexto, prompts, solicitações HTTP e mais, para uma plataforma de rastreamento de terceiros.',
+ configProviderTitle: {
+ configured: 'Configurado',
+ notConfigured: 'Configure o provedor para habilitar o rastreamento',
+ moreProvider: 'Mais provedores',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'Uma plataforma de desenvolvedor completa para cada etapa do ciclo de vida do aplicativo impulsionado por LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Rastreamentos, avaliações, gerenciamento de prompts e métricas para depurar e melhorar seu aplicativo LLM.',
+ },
+ inUse: 'Em uso',
+ configProvider: {
+ title: 'Configurar ',
+ placeholder: 'Insira sua {{key}}',
+ project: 'Projeto',
+ publicKey: 'Chave Pública',
+ secretKey: 'Chave Secreta',
+ viewDocsLink: 'Ver documentação de {{key}}',
+ removeConfirmTitle: 'Remover configuração de {{key}}?',
+ removeConfirmContent: 'A configuração atual está em uso, removê-la desligará o recurso de Rastreamento.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/pt-BR/common.ts b/web/i18n/pt-BR/common.ts
index b82434d52345f8..1b29d066695cc2 100644
--- a/web/i18n/pt-BR/common.ts
+++ b/web/i18n/pt-BR/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Cancelar',
clear: 'Limpar',
save: 'Salvar',
+ saveAndEnable: 'Salvar e Ativar',
edit: 'Editar',
add: 'Adicionar',
added: 'Adicionado',
@@ -408,7 +409,7 @@ const translation = {
latestAvailable: 'Dify {{version}} é a última versão disponível.',
},
appMenus: {
- overview: 'Visão Geral',
+ overview: 'Monitoramento',
promptEng: 'Orquestrar',
apiAccess: 'Acesso à API',
logAndAnn: 'Logs e Anúncios',
diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts
index 6579a1aed4c8d4..938bef31c3e1af 100644
--- a/web/i18n/pt-BR/workflow.ts
+++ b/web/i18n/pt-BR/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Voltar para o editor',
conversationLog: 'Registro de conversa',
features: 'Recursos',
- debugAndPreview: 'Depurar e visualizar',
+ debugAndPreview: 'Visualizar',
restart: 'Reiniciar',
currentDraft: 'Rascunho atual',
currentDraftUnpublished: 'Rascunho atual não publicado',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: 'É necessária a reconfiguração da ferramenta após a atualização do fluxo de trabalho.',
viewDetailInTracingPanel: 'Ver detalhes',
},
+ env: {
+ envPanelTitle: 'Variáveis de Ambiente',
+ envDescription: 'Variáveis de ambiente podem ser usadas para armazenar informações privadas e credenciais. Elas são somente leitura e podem ser separadas do arquivo DSL durante a exportação.',
+ envPanelButton: 'Adicionar Variável',
+ modal: {
+ title: 'Adicionar Variável de Ambiente',
+ editTitle: 'Editar Variável de Ambiente',
+ type: 'Tipo',
+ name: 'Nome',
+ namePlaceholder: 'nome da env',
+ value: 'Valor',
+ valuePlaceholder: 'valor da env',
+ secretTip: 'Usado para definir informações ou dados sensíveis, com configurações DSL configuradas para prevenção de vazamentos.',
+ },
+ export: {
+ title: 'Exportar variáveis de ambiente secretas?',
+ checkbox: 'Exportar valores secretos',
+ ignore: 'Exportar DSL',
+ export: 'Exportar DSL com valores secretos',
+ },
+ },
changeHistory: {
title: 'Histórico de alterações',
placeholder: 'Você ainda não alterou nada',
diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts
index 333678863fb166..01a158f4c71997 100644
--- a/web/i18n/ro-RO/app.ts
+++ b/web/i18n/ro-RO/app.ts
@@ -85,6 +85,42 @@ const translation = {
workflow: 'Flux de lucru',
completion: 'Finalizare',
},
+ tracing: {
+ title: 'Urmărirea performanței aplicației',
+ description: 'Configurarea unui furnizor LLMOps terț și urmărirea performanței aplicației.',
+ config: 'Configurare',
+ collapse: 'Restrânge',
+ expand: 'Extinde',
+ tracing: 'Urmărire',
+ disabled: 'Dezactivat',
+ disabledTip: 'Vă rugăm să configurați mai întâi furnizorul',
+ enabled: 'În serviciu',
+ tracingDescription: 'Captează contextul complet al execuției aplicației, inclusiv apelurile LLM, context, prompt-uri, cereri HTTP și altele, către o platformă de urmărire terță.',
+ configProviderTitle: {
+ configured: 'Configurat',
+ notConfigured: 'Configurați furnizorul pentru a activa urmărirea',
+ moreProvider: 'Mai mulți furnizori',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'O platformă de dezvoltare all-in-one pentru fiecare etapă a ciclului de viață al aplicației bazate pe LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Urmărire, evaluări, gestionarea prompt-urilor și metrici pentru depanarea și îmbunătățirea aplicației dvs. LLM.',
+ },
+ inUse: 'În utilizare',
+ configProvider: {
+ title: 'Configurare ',
+ placeholder: 'Introduceți {{key}}-ul dvs.',
+ project: 'Proiect',
+ publicKey: 'Cheie publică',
+ secretKey: 'Cheie secretă',
+ viewDocsLink: 'Vizualizați documentația {{key}}',
+ removeConfirmTitle: 'Eliminați configurația {{key}}?',
+ removeConfirmContent: 'Configurația curentă este în uz, eliminarea acesteia va dezactiva funcția de Urmărire.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts
index d61fc63f5c2e41..e7037f65b8f44d 100644
--- a/web/i18n/ro-RO/common.ts
+++ b/web/i18n/ro-RO/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Anulează',
clear: 'Șterge',
save: 'Salvează',
+ saveAndEnable: 'Salvează și Activează',
edit: 'Editează',
add: 'Adaugă',
added: 'Adăugat',
@@ -407,7 +408,7 @@ const translation = {
latestAvailable: 'Dify {{version}} este ultima versiune disponibilă.',
},
appMenus: {
- overview: 'Prezentare generală',
+ overview: 'Monitorizare',
promptEng: 'Orchestrare',
apiAccess: 'Acces API',
logAndAnn: 'Jurnale și Ann.',
diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts
index e1f0943179278d..beefdc1132a7ea 100644
--- a/web/i18n/ro-RO/workflow.ts
+++ b/web/i18n/ro-RO/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Înapoi la editor',
conversationLog: 'Jurnal conversație',
features: 'Funcționalități',
- debugAndPreview: 'Depanare și previzualizare',
+ debugAndPreview: 'Previzualizare',
restart: 'Repornește',
currentDraft: 'Schimbare curentă',
currentDraftUnpublished: 'Schimbare curentă nepublicată',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: 'Reconfigurarea instrumentului este necesară după actualizarea fluxului de lucru.',
viewDetailInTracingPanel: 'Vezi detalii',
},
+ env: {
+ envPanelTitle: 'Variabile de Mediu',
+ envDescription: 'Variabilele de mediu pot fi utilizate pentru a stoca informații private și credențiale. Acestea sunt doar pentru citire și pot fi separate de fișierul DSL în timpul exportului.',
+ envPanelButton: 'Adaugă Variabilă',
+ modal: {
+ title: 'Adaugă Variabilă de Mediu',
+ editTitle: 'Editează Variabilă de Mediu',
+ type: 'Tip',
+ name: 'Nume',
+ namePlaceholder: 'nume mediu',
+ value: 'Valoare',
+ valuePlaceholder: 'valoare mediu',
+ secretTip: 'Utilizat pentru a defini informații sau date sensibile, cu setări DSL configurate pentru prevenirea scurgerilor.',
+ },
+ export: {
+ title: 'Exportă variabile de mediu secrete?',
+ checkbox: 'Exportă valori secrete',
+ ignore: 'Exportă DSL',
+ export: 'Exportă DSL cu valori secrete',
+ },
+ },
changeHistory: {
title: 'Istoric modificări',
placeholder: 'Nu ați schimbat nimic încă',
diff --git a/web/i18n/uk-UA/app.ts b/web/i18n/uk-UA/app.ts
index 209bf12598c7a9..3add9dfe8146c5 100644
--- a/web/i18n/uk-UA/app.ts
+++ b/web/i18n/uk-UA/app.ts
@@ -85,6 +85,42 @@ const translation = {
workflow: 'Робочий процес',
completion: 'Завершення',
},
+ tracing: {
+ title: 'Відстеження продуктивності додатку',
+ description: 'Налаштування стороннього провайдера LLMOps та відстеження продуктивності додатку.',
+ config: 'Налаштувати',
+ collapse: 'Згорнути',
+ expand: 'Розгорнути',
+ tracing: 'Відстеження',
+ disabled: 'Вимкнено',
+ disabledTip: 'Спочатку налаштуйте провайдера',
+ enabled: 'В роботі',
+ tracingDescription: 'Захоплення повного контексту виконання додатку, включаючи виклики LLM, контекст, підказки, HTTP-запити та інше, на сторонню платформу відстеження.',
+ configProviderTitle: {
+ configured: 'Налаштовано',
+ notConfigured: 'Налаштуйте провайдера для увімкнення відстеження',
+ moreProvider: 'Більше провайдерів',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'Універсальна платформа розробника для кожного етапу життєвого циклу додатку на основі LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Трасування, оцінки, управління підказками та метрики для налагодження та покращення вашого LLM-додатку.',
+ },
+ inUse: 'Використовується',
+ configProvider: {
+ title: 'Налаштувати ',
+ placeholder: 'Введіть ваш {{key}}',
+ project: 'Проект',
+ publicKey: 'Публічний ключ',
+ secretKey: 'Секретний ключ',
+ viewDocsLink: 'Переглянути документацію {{key}}',
+ removeConfirmTitle: 'Видалити налаштування {{key}}?',
+ removeConfirmContent: 'Поточне налаштування використовується, його видалення вимкне функцію Відстеження.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts
index ededdcb2281115..fb0003d35f6d58 100644
--- a/web/i18n/uk-UA/common.ts
+++ b/web/i18n/uk-UA/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Скасувати',
clear: 'Очистити',
save: 'Зберегти',
+ saveAndEnable: 'Зберегти та Увімкнути',
edit: 'Редагувати',
add: 'Додати',
added: 'Додано',
@@ -408,7 +409,7 @@ const translation = {
latestAvailable: 'Dify {{version}} – це найновіша доступна версія.',
},
appMenus: {
- overview: 'Огляд',
+ overview: 'Моніторинг',
promptEng: 'Налаштування',
apiAccess: 'Доступ до API',
logAndAnn: 'Журнали та Повідомлення.',
diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts
index 399b7bde7616d2..db7e76da69d655 100644
--- a/web/i18n/uk-UA/workflow.ts
+++ b/web/i18n/uk-UA/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: 'Повернутися до редактора',
conversationLog: 'Журнал розмов',
features: 'Функції',
- debugAndPreview: 'Налагодження та попередній перегляд',
+ debugAndPreview: 'Попередній перегляд',
restart: 'Перезапустити',
currentDraft: 'Поточний чернетка',
currentDraftUnpublished: 'Поточний чернетка неопублікований',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: 'Після оновлення робочого потоку необхідна переконфігурація інструменту.',
viewDetailInTracingPanel: 'Переглянути деталі',
},
+ env: {
+ envPanelTitle: 'Змінні середовища',
+ envDescription: 'Змінні середовища можуть використовуватися для зберігання приватної інформації та облікових даних. Вони доступні лише для читання і можуть бути відокремлені від файлу DSL під час експорту.',
+ envPanelButton: 'Додати змінну',
+ modal: {
+ title: 'Додати змінну середовища',
+ editTitle: 'Редагувати змінну середовища',
+ type: 'Тип',
+ name: 'Назва',
+ namePlaceholder: 'назва середовища',
+ value: 'Значення',
+ valuePlaceholder: 'значення середовища',
+ secretTip: 'Використовується для визначення конфіденційної інформації або даних, з налаштуваннями DSL, сконфігурованими для запобігання витоку.',
+ },
+ export: {
+ title: 'Експортувати секретні змінні середовища?',
+ checkbox: 'Експортувати секретні значення',
+ ignore: 'Експортувати DSL',
+ export: 'Експортувати DSL з секретними значеннями',
+ },
+ },
changeHistory: {
title: 'Історія змін',
placeholder: 'Ви ще нічого не змінили',
diff --git a/web/i18n/vi-VN/app-annotation.ts b/web/i18n/vi-VN/app-annotation.ts
index 2b70b8608c7688..6a9457f3d70508 100644
--- a/web/i18n/vi-VN/app-annotation.ts
+++ b/web/i18n/vi-VN/app-annotation.ts
@@ -1,6 +1,6 @@
const translation = {
title: 'Chú thích',
- name: 'Chú thích cho câu Trả lời',
+ name: 'Chú thích cho câu trả lời',
editBy: 'Câu trả lời được chỉnh sửa bởi {{author}}',
noData: {
title: 'Không có chú thích',
@@ -15,45 +15,45 @@ const translation = {
actions: 'hành động',
addAnnotation: 'Thêm chú thích',
bulkImport: 'Nhập hàng loạt',
- bulkExport: 'Xuất hoàng loạt',
+ bulkExport: 'Xuất hàng loạt',
clearAll: 'Xóa tất cả chú thích',
},
},
editModal: {
- title: 'Chỉnh sửa chú thích Trả lời',
- queryName: 'Truy vấn người dùng',
- answerName: 'Câu chuyện của BOT',
+ title: 'Chỉnh sửa chú thích câu trả lời',
+ queryName: 'Câu hỏi của người dùng',
+ answerName: 'Câu trả lời của AI',
yourAnswer: 'Câu trả lời của bạn',
answerPlaceholder: 'Nhập câu trả lời của bạn vào đây',
- yourQuery: 'Truy vấn của bạn',
- queryPlaceholder: 'Nhập truy vấn của bạn ở đây',
+ yourQuery: 'Câu hỏi của bạn',
+ queryPlaceholder: 'Nhập câu hỏi của bạn ở đây',
removeThisCache: 'Xóa chú thích này',
createdAt: 'Được tạo lúc',
},
addModal: {
- title: 'Thêm chú thích Trả lời',
+ title: 'Thêm chú thích câu trả lời',
queryName: 'Câu hỏi',
- answerName: 'Trả lời',
+ answerName: 'Câu trả lời',
answerPlaceholder: 'Nhập câu trả lời vào đây',
- queryPlaceholder: 'Nhập truy vấn ở đây',
- createNext: 'Thêm một phản hồi có chú thích khác',
+ queryPlaceholder: 'Nhập câu hỏi ở đây',
+ createNext: 'Thêm chú thích khác',
},
batchModal: {
title: 'Nhập hàng loạt',
csvUploadTitle: 'Kéo và thả tệp CSV của bạn vào đây hoặc ',
- browse: 'duyệt',
+ browse: 'chọn tệp',
tip: 'Tệp CSV phải tuân theo cấu trúc sau:',
question: 'câu hỏi',
- answer: 'trả lời',
- contentTitle: 'đoạn nội dung',
+ answer: 'câu trả lời',
+ contentTitle: 'tiêu đề nội dung',
content: 'nội dung',
template: 'Tải mẫu tại đây',
- cancel: 'Bỏ',
- run: 'Run Batch',
- runError: 'Run batch failed',
- processing: 'In batch processing',
- completed: 'Import completed',
- error: 'Import Error',
+ cancel: 'Hủy',
+ run: 'Chạy hàng loạt',
+ runError: 'Chạy hàng loạt thất bại',
+ processing: 'Đang xử lý hàng loạt',
+ completed: 'Nhập hoàn tất',
+ error: 'Lỗi khi nhập',
ok: 'OK',
},
errorMessage: {
@@ -61,27 +61,27 @@ const translation = {
queryRequired: 'Câu hỏi là bắt buộc',
},
viewModal: {
- annotatedResponse: 'Chú thích Trả lời',
- hitHistory: 'Lịch sử lượt truy cập',
- hit: 'Truy cập',
+ annotatedResponse: 'Câu trả lời đã chú thích',
+ hitHistory: 'Lịch sử truy cập',
+ hit: 'Lượt truy cập',
hits: 'Lượt truy cập',
noHitHistory: 'Không có lịch sử truy cập',
},
hitHistoryTable: {
- query: 'Truy vấn',
- match: 'Chính xác',
- response: 'Phản ứng',
+ query: 'Câu hỏi',
+ match: 'Độ chính xác',
+ response: 'Phản hồi',
source: 'Nguồn',
score: 'Điểm',
time: 'Thời gian',
},
initSetup: {
- title: 'Chú thích Trả lời Thiết lập ban đầu',
- configTitle: 'Thiết lập trả lời chú thích',
+ title: 'Thiết lập ban đầu cho chú thích câu trả lời',
+ configTitle: 'Thiết lập chú thích câu trả lời',
confirmBtn: 'Lưu & Kích hoạt',
configConfirmBtn: 'Lưu',
},
- embeddingModelSwitchTip: 'Mô hình vector hóa văn bản chú thích, mô hình chuyển đổi sẽ được nhúng lại, dẫn đến phát sinh thêm chi phí.',
+ embeddingModelSwitchTip: 'Mô hình vector hóa văn bản chú thích, việc chuyển đổi mô hình sẽ dẫn đến việc nhúng lại, có thể phát sinh thêm chi phí.',
}
export default translation
diff --git a/web/i18n/vi-VN/app-api.ts b/web/i18n/vi-VN/app-api.ts
index 67c5a8bfa70ca0..cb89b980088965 100644
--- a/web/i18n/vi-VN/app-api.ts
+++ b/web/i18n/vi-VN/app-api.ts
@@ -1,35 +1,35 @@
const translation = {
- apiServer: 'API Server',
- apiKey: 'API Key',
+ apiServer: 'Máy chủ API',
+ apiKey: 'Khóa API',
status: 'Trạng thái',
- disabled: 'Tắt',
+ disabled: 'Đã tắt',
ok: 'Đang hoạt động',
copy: 'Sao chép',
- copied: 'Đã chép',
+ copied: 'Đã sao chép',
play: 'Chạy',
- pause: 'Dừng',
+ pause: 'Tạm dừng',
playing: 'Đang chạy',
merMaind: {
- rerender: 'Redo Rerender',
+ rerender: 'Vẽ lại',
},
never: 'Không bao giờ',
apiKeyModal: {
- apiSecretKey: 'API khoá bí mật',
- apiSecretKeyTips: 'Để ngăn chặn việc lạm dụng API, hãy bảo vệ Khóa API của bạn. Tránh sử dụng nó dưới dạng văn bản thuần túy trong mã giao diện người dùng. :)',
+ apiSecretKey: 'Khóa bí mật API',
+ apiSecretKeyTips: 'Để ngăn chặn việc lạm dụng API, hãy bảo vệ Khóa API của bạn. Tránh sử dụng nó dưới dạng văn bản thuần trong mã giao diện người dùng.',
createNewSecretKey: 'Tạo khóa bí mật mới',
secretKey: 'Khóa bí mật',
- created: 'CREATED',
- lastUsed: 'LAST USED',
- generateTips: 'Giữ chìa khóa này ở nơi an toàn và dễ tiếp cận.',
+ created: 'ĐÃ TẠO',
+ lastUsed: 'SỬ DỤNG LẦN CUỐI',
+ generateTips: 'Hãy lưu giữ khóa này ở nơi an toàn và dễ tiếp cận.',
},
actionMsg: {
deleteConfirmTitle: 'Xóa khóa bí mật này?',
- deleteConfirmTips: 'Hành động này không thể được hoàn tác.',
+ deleteConfirmTips: 'Hành động này không thể hoàn tác.',
ok: 'OK',
},
completionMode: {
- title: 'Completion App API',
- info: 'Đối với việc tạo văn bản chất lượng cao, như bài viết, tóm tắt và dịch thuật, hãy sử dụng API hoàn thành tin nhắn với đầu vào người dùng. Việc tạo văn bản dựa trên các thông số mô hình và mẫu đề xuất được thiết lập trong Dify Prompt Engineering.',
+ title: 'API ứng dụng hoàn thành',
+ info: 'Đối với việc tạo văn bản chất lượng cao như bài viết, tóm tắt và dịch thuật, hãy sử dụng API hoàn thành tin nhắn với đầu vào từ người dùng. Việc tạo văn bản dựa trên các thông số mô hình và mẫu đề xuất được thiết lập trong Dify Prompt Engineering.',
createCompletionApi: 'Tạo tin nhắn hoàn thành',
createCompletionApiTip: 'Tạo một tin nhắn hoàn thành để hỗ trợ chế độ câu hỏi và trả lời.',
inputsTips: '(Tùy chọn) Cung cấp các trường đầu vào người dùng dưới dạng cặp khóa-giá trị, tương ứng với các biến trong Prompt Eng. Khóa là tên biến, Giá trị là giá trị tham số. Nếu loại trường là Lựa chọn, Giá trị đã gửi phải là một trong các lựa chọn đã thiết lập trước.',
@@ -44,7 +44,7 @@ const translation = {
parametersApiTip: 'Truy xuất các tham số Đầu vào được cấu hình, bao gồm tên biến, tên trường, loại và giá trị mặc định. Thường được sử dụng để hiển thị các trường này trong một biểu mẫu hoặc điền vào các giá trị mặc định sau khi máy khách tải.',
},
chatMode: {
- title: 'Chat App API',
+ title: 'API ứng dụng trò chuyện',
info: 'Đối với ứng dụng trò chuyện linh hoạt sử dụng định dạng Câu hỏi và Trả lời, gọi API tin nhắn trò chuyện để bắt đầu cuộc trò chuyện. Duy trì cuộc trò chuyện liên tục bằng cách chuyển conversation_id đã trả về. Các tham số phản hồi và mẫu phụ thuộc vào các cài đặt của Dify Prompt Eng.',
createChatApi: 'Tạo tin nhắn trò chuyện',
createChatApiTip: 'Tạo một tin nhắn trò chuyện mới hoặc tiếp tục một cuộc trò chuyện đang tồn tại.',
@@ -53,7 +53,7 @@ const translation = {
blocking: 'Loại chặn, đợi để thực hiện hoàn tất và trả kết quả. (Yêu cầu có thể bị gián đoạn nếu quá trình kéo dài)',
streaming: 'trả về dữ liệu theo luồng. Thực hiện trả dữ liệu theo luồng dựa trên SSE (Sự kiện được gửi từ máy chủ).',
conversationIdTip: '(Tùy chọn) ID cuộc trò chuyện: để trống cho cuộc trò chuyện lần đầu; chuyển conversation_id từ ngữ cảnh để tiếp tục cuộc trò chuyện.',
- messageFeedbackApi: 'Phản hồi của người dùng cuối về tin nhắn, như',
+ messageFeedbackApi: 'Phản hồi của người dùng cuối về tin nhắn',
messageFeedbackApiTip: 'Đánh giá các tin nhắn nhận được thay mặt cho người dùng cuối với các lựa chọn thích hoặc không thích. Dữ liệu này hiển thị trên trang Nhật ký & Chú thích và được sử dụng cho việc điều chỉnh mô hình trong tương lai.',
messageIDTip: 'ID tin nhắn',
ratingTip: 'thích hoặc không thích, null là hủy bỏ',
@@ -61,11 +61,11 @@ const translation = {
chatMsgHistoryApiTip: 'Trang đầu tiên trả về `limit` tin nhắn mới nhất, được sắp xếp theo thứ tự ngược lại.',
chatMsgHistoryConversationIdTip: 'ID Cuộc trò chuyện',
chatMsgHistoryFirstId: 'ID của bản ghi trò chuyện đầu tiên trên trang hiện tại. Giá trị mặc định là không có.',
- chatMsgHistoryLimit: 'Bao nhiêu cuộc trò chuyện được trả lại trong một yêu cầu',
+ chatMsgHistoryLimit: 'Số lượng cuộc trò chuyện được trả về trong một yêu cầu',
conversationsListApi: 'Lấy danh sách cuộc trò chuyện',
conversationsListApiTip: 'Lấy danh sách phiên của người dùng hiện tại. Theo mặc định, trả về 20 phiên cuối cùng.',
conversationsListFirstIdTip: 'ID của bản ghi cuối cùng trên trang hiện tại, mặc định không có.',
- conversationsListLimitTip: 'Bao nhiêu cuộc trò chuyện được trả lại trong một yêu cầu',
+ conversationsListLimitTip: 'Số lượng cuộc trò chuyện được trả về trong một yêu cầu',
conversationRenamingApi: 'Đổi tên cuộc trò chuyện',
conversationRenamingApiTip: 'Đổi tên cuộc trò chuyện; tên sẽ được hiển thị trong giao diện nhiều phiên.',
conversationRenamingNameTip: 'Tên mới',
@@ -74,7 +74,7 @@ const translation = {
},
develop: {
requestBody: 'Nội dung yêu cầu',
- pathParams: 'Thông số đường dẫn',
+ pathParams: 'Tham số đường dẫn',
query: 'Truy vấn',
},
}
diff --git a/web/i18n/vi-VN/app-debug.ts b/web/i18n/vi-VN/app-debug.ts
index 327c1f0ad25083..4797f768e3787e 100644
--- a/web/i18n/vi-VN/app-debug.ts
+++ b/web/i18n/vi-VN/app-debug.ts
@@ -1,102 +1,102 @@
const translation = {
pageTitle: {
- line1: 'PROMPT',
- line2: 'Engineering',
+ line1: 'YÊU CẦU',
+ line2: 'KỸ THUẬT',
},
- orchestrate: 'Dàn nhạc',
+ orchestrate: 'Điều phối',
promptMode: {
- simple: 'Chuyển sang Chế độ Chuyên gia để chỉnh sửa toàn bộ PROMPT',
+ simple: 'Chuyển sang Chế độ Chuyên gia để chỉnh sửa toàn bộ YÊU CẦU',
advanced: 'Chế độ Chuyên gia',
- switchBack: 'Chuyển về',
+ switchBack: 'Quay lại',
advancedWarning: {
- title: 'Bạn đã chuyển sang Chế độ Chuyên gia, và một khi bạn sửa đổi PROMPT, bạn KHÔNG THỂ quay lại chế độ cơ bản.',
- description: 'Trong Chế độ Chuyên gia, bạn có thể chỉnh sửa toàn bộ PROMPT.',
+ title: 'Bạn đã chuyển sang Chế độ Chuyên gia. Sau khi sửa đổi YÊU CẦU, bạn KHÔNG THỂ quay lại chế độ cơ bản.',
+ description: 'Trong Chế độ Chuyên gia, bạn có thể chỉnh sửa toàn bộ YÊU CẦU.',
learnMore: 'Tìm hiểu thêm',
- ok: 'OK',
+ ok: 'Đồng ý',
},
operation: {
- addMessage: 'Thêm Tin nhắn',
+ addMessage: 'Thêm tin nhắn',
},
- contextMissing: 'Thiếu thành phần Ngữ cảnh, hiệu quả của prompt có thể không tốt.',
+ contextMissing: 'Thiếu thành phần Ngữ cảnh, hiệu quả của yêu cầu có thể không tốt.',
},
operation: {
- applyConfig: 'Xuất bản',
+ applyConfig: 'Áp dụng',
resetConfig: 'Đặt lại',
debugConfig: 'Gỡ lỗi',
- addFeature: 'Thêm Tính năng',
+ addFeature: 'Thêm tính năng',
automatic: 'Tự động',
stopResponding: 'Dừng phản hồi',
agree: 'thích',
disagree: 'không thích',
- cancelAgree: 'Hủy thích',
- cancelDisagree: 'Hủy không thích',
- userAction: 'Người dùng ',
+ cancelAgree: 'Bỏ thích',
+ cancelDisagree: 'Bỏ không thích',
+ userAction: 'Hành động người dùng ',
},
notSetAPIKey: {
- title: 'Khóa nhà cung cấp LLM chưa được đặt',
- trailFinished: 'Kết thúc dấu vết',
- description: 'Khóa nhà cung cấp LLM chưa được đặt, và cần được đặt trước khi gỡ lỗi.',
+ title: 'Chưa thiết lập khóa API của nhà cung cấp LLM',
+ trailFinished: 'Kết thúc dùng thử',
+ description: 'Chưa thiết lập khóa API của nhà cung cấp LLM. Cần thiết lập trước khi gỡ lỗi.',
settingBtn: 'Đi đến cài đặt',
},
trailUseGPT4Info: {
- title: 'Hiện không hỗ trợ gpt-4',
- description: 'Sử dụng gpt-4, vui lòng đặt API Key.',
+ title: 'Hiện không hỗ trợ GPT-4',
+ description: 'Để sử dụng GPT-4, vui lòng thiết lập API Key.',
},
feature: {
groupChat: {
title: 'Nâng cao trò chuyện',
- description: 'Thêm cài đặt trước cuộc trò chuyện cho ứng dụng có thể cải thiện trải nghiệm người dùng.',
+ description: 'Thêm cài đặt trước cho cuộc trò chuyện có thể cải thiện trải nghiệm người dùng.',
},
groupExperience: {
title: 'Nâng cao trải nghiệm',
},
conversationOpener: {
- title: 'Khởi động cuộc trò chuyện',
- description: 'Trong một ứng dụng trò chuyện, câu nói đầu tiên mà AI tự động nói với người dùng thường được sử dụng như một lời chào.',
+ title: 'Mở đầu cuộc trò chuyện',
+ description: 'Trong ứng dụng trò chuyện, câu nói đầu tiên mà AI tự động nói với người dùng thường được sử dụng như một lời chào.',
},
suggestedQuestionsAfterAnswer: {
- title: 'Theo dõi',
- description: 'Thiết lập đề xuất câu hỏi tiếp theo có thể mang lại trò chuyện tốt hơn cho người dùng.',
+ title: 'Câu hỏi gợi ý',
+ description: 'Thiết lập đề xuất câu hỏi tiếp theo có thể tạo ra cuộc trò chuyện tốt hơn cho người dùng.',
resDes: '3 đề xuất cho câu hỏi tiếp theo của người dùng.',
tryToAsk: 'Thử hỏi',
},
moreLikeThis: {
- title: 'Nhiều hơn như vậy',
- description: 'Tạo nhiều văn bản cùng một lúc, và sau đó chỉnh sửa và tiếp tục tạo ra.',
- generateNumTip: 'Số lượng mỗi lần tạo ra',
- tip: 'Sử dụng tính năng này sẽ tốn thêm token.',
+ title: 'Thêm tương tự',
+ description: 'Tạo nhiều văn bản cùng một lúc, sau đó chỉnh sửa và tiếp tục tạo ra.',
+ generateNumTip: 'Số lượng mỗi lần tạo',
+ tip: 'Sử dụng tính năng này sẽ tiêu tốn thêm token.',
},
speechToText: {
- title: 'Chuyển đổi Giọng nói thành Văn bản',
- description: 'Một khi được bật, bạn có thể sử dụng đầu vào giọng nói.',
- resDes: 'Đầu vào Giọng nói đã được bật',
+ title: 'Chuyển đổi giọng nói thành văn bản',
+ description: 'Khi được bật, bạn có thể sử dụng đầu vào bằng giọng nói.',
+ resDes: 'Đã bật đầu vào bằng giọng nói',
},
textToSpeech: {
- title: 'Chuyển đổi Văn bản thành Giọng nói',
- description: 'Một khi được bật, văn bản có thể được chuyển đổi thành giọng nói.',
- resDes: 'Chuyển đổi Văn bản thành Âm thanh đã được bật',
+ title: 'Chuyển đổi văn bản thành giọng nói',
+ description: 'Khi được bật, văn bản có thể được chuyển đổi thành giọng nói.',
+ resDes: 'Đã bật chuyển đổi văn bản thành âm thanh',
},
citation: {
- title: 'Trích dẫn và Ghi chú',
- description: 'Một khi được bật, hiển thị tài liệu nguồn và phần được ghi nhận của nội dung được tạo ra.',
- resDes: 'Trích dẫn và Ghi chú đã được bật',
+ title: 'Trích dẫn và chú thích',
+ description: 'Khi được bật, hiển thị nguồn tài liệu và phần được trích dẫn của nội dung được tạo ra.',
+ resDes: 'Đã bật trích dẫn và chú thích',
},
annotation: {
- title: 'Phản hồi Chú thích',
- description: 'Bạn có thể thêm phản hồi chất lượng cao vào bộ nhớ cache để ưu tiên phù hợp với các câu hỏi của người dùng tương tự.',
- resDes: 'Phản hồi Chú thích đã được bật',
+ title: 'Phản hồi có chú thích',
+ description: 'Bạn có thể thêm phản hồi chất lượng cao vào bộ nhớ cache để ưu tiên phù hợp với các câu hỏi tương tự của người dùng.',
+ resDes: 'Đã bật phản hồi có chú thích',
scoreThreshold: {
- title: 'Ngưỡng Điểm',
- description: 'Được sử dụng để đặt ngưỡng tương đồng cho phản hồi chú thích.',
- easyMatch: 'Tương đồng Dễ dàng',
- accurateMatch: 'Tương đồng Chính xác',
+ title: 'Ngưỡng điểm',
+ description: 'Được sử dụng để đặt ngưỡng tương đồng cho phản hồi có chú thích.',
+ easyMatch: 'Khớp dễ dàng',
+ accurateMatch: 'Khớp chính xác',
},
matchVariable: {
- title: 'Biến Phù hợp',
- choosePlaceholder: 'Chọn biến phù hợp',
+ title: 'Biến khớp',
+ choosePlaceholder: 'Chọn biến khớp',
},
- cacheManagement: 'Chú thích',
- cached: 'Đã ghi chú',
+ cacheManagement: 'Quản lý chú thích',
+ cached: 'Đã lưu cache',
remove: 'Xóa',
removeConfirm: 'Xóa chú thích này?',
add: 'Thêm chú thích',
@@ -104,36 +104,36 @@ const translation = {
},
dataSet: {
title: 'Ngữ cảnh',
- noData: 'Bạn có thể nhập Dữ liệu như là ngữ cảnh',
+ noData: 'Bạn có thể nhập dữ liệu làm ngữ cảnh',
words: 'Từ',
- textBlocks: 'Khối Văn bản',
- selectTitle: 'Chọn Kiến thức tham khảo',
+ textBlocks: 'Khối văn bản',
+ selectTitle: 'Chọn kiến thức tham khảo',
selected: 'Kiến thức đã chọn',
- noDataSet: 'Không tìm thấy Kiến thức',
- toCreate: 'Đi tới tạo mới',
- notSupportSelectMulti: 'Hiện chỉ hỗ trợ một Kiến thức',
+ noDataSet: 'Không tìm thấy kiến thức',
+ toCreate: 'Tạo mới',
+ notSupportSelectMulti: 'Hiện chỉ hỗ trợ một kiến thức',
queryVariable: {
- title: 'Biến Truy vấn',
- tip: 'Biến này sẽ được sử dụng làm đầu vào truy vấn cho việc truy xuất ngữ cảnh, lấy thông tin ngữ cảnh liên quan đến đầu vào của biến này.',
+ title: 'Biến truy vấn',
+ tip: 'Biến này sẽ được sử dụng làm đầu vào truy vấn để truy xuất ngữ cảnh, lấy thông tin ngữ cảnh liên quan đến đầu vào của biến này.',
choosePlaceholder: 'Chọn biến truy vấn',
noVar: 'Không có biến',
- noVarTip: 'xin vui lòng tạo một biến dưới phần Biến',
- unableToQueryDataSet: 'Không thể truy vấn Kiến thức',
- unableToQueryDataSetTip: 'Không thể truy vấn Kiến thức thành công, vui lòng chọn một biến truy vấn ngữ cảnh trong phần ngữ cảnh.',
- ok: 'OK',
- contextVarNotEmpty: 'biến truy vấn ngữ cảnh không thể trống',
- deleteContextVarTitle: 'Xóa biến “{{varName}}”?',
- deleteContextVarTip: 'Biến này đã được thiết lập là biến truy vấn ngữ cảnh, và việc loại bỏ nó sẽ ảnh hưởng đến việc sử dụng bình thường của Kiến thức. Nếu bạn vẫn cần xóa nó, vui lòng chọn lại nó trong phần ngữ cảnh.',
+ noVarTip: 'Vui lòng tạo một biến trong phần Biến',
+ unableToQueryDataSet: 'Không thể truy vấn kiến thức',
+ unableToQueryDataSetTip: 'Không thể truy vấn kiến thức thành công, vui lòng chọn một biến truy vấn ngữ cảnh trong phần ngữ cảnh.',
+ ok: 'Đồng ý',
+ contextVarNotEmpty: 'Biến truy vấn ngữ cảnh không thể trống',
+ deleteContextVarTitle: 'Xóa biến "{{varName}}"?',
+ deleteContextVarTip: 'Biến này đã được thiết lập là biến truy vấn ngữ cảnh, và việc xóa nó sẽ ảnh hưởng đến việc sử dụng bình thường của kiến thức. Nếu bạn vẫn cần xóa nó, vui lòng chọn lại biến khác trong phần ngữ cảnh.',
},
},
tools: {
title: 'Công cụ',
- tips: 'Công cụ cung cấp một phương thức gọi API chuẩn, lấy đầu vào người dùng hoặc biến làm tham số yêu cầu để truy vấn dữ liệu bên ngoài như ngữ cảnh.',
- toolsInUse: '{{count}} công cụ đang được sử dụng',
+ tips: 'Công cụ cung cấp phương thức gọi API tiêu chuẩn, sử dụng đầu vào của người dùng hoặc biến làm tham số yêu cầu để truy vấn dữ liệu bên ngoài như ngữ cảnh.',
+ toolsInUse: 'Đang sử dụng {{count}} công cụ',
modal: {
title: 'Công cụ',
toolType: {
- title: 'Loại Công cụ',
+ title: 'Loại công cụ',
placeholder: 'Vui lòng chọn loại công cụ',
},
name: {
@@ -141,20 +141,20 @@ const translation = {
placeholder: 'Vui lòng nhập tên',
},
variableName: {
- title: 'Tên Biến',
+ title: 'Tên biến',
placeholder: 'Vui lòng nhập tên biến',
},
},
},
conversationHistory: {
- title: 'Lịch sử Cuộc trò chuyện',
- description: 'Đặt tên tiền tố cho các vai trò trong cuộc trò chuyện',
- tip: 'Lịch sử Cuộc trò chuyện chưa được bật, vui lòng thêm vào phần prompt ở trên.',
+ title: 'Lịch sử cuộc trò chuyện',
+ description: 'Đặt tiền tố cho các vai trò trong cuộc trò chuyện',
+ tip: 'Lịch sử cuộc trò chuyện chưa được bật, vui lòng thêm vào phần prompt ở trên.',
learnMore: 'Tìm hiểu thêm',
editModal: {
- title: 'Chỉnh sửa Tên Vai trò Cuộc trò chuyện',
- userPrefix: 'Tiền tố Người dùng',
- assistantPrefix: 'Tiền tố Trợ lý',
+ title: 'Chỉnh sửa tên vai trò trong cuộc trò chuyện',
+ userPrefix: 'Tiền tố người dùng',
+ assistantPrefix: 'Tiền tố trợ lý',
},
},
toolbox: {
@@ -162,29 +162,29 @@ const translation = {
},
moderation: {
title: 'Kiểm duyệt nội dung',
- description: 'Bảo vệ đầu ra của mô hình bằng cách sử dụng API kiểm duyệt hoặc duy trì một danh sách từ nhạy cảm.',
- allEnabled: 'Nội dung ĐẦU VÀO/ĐẦU RA Đã Bật',
- inputEnabled: 'Nội dung ĐẦU VÀO Đã Bật',
- outputEnabled: 'Nội dung ĐẦU RA Đã Bật',
+ description: 'Bảo vệ đầu ra của mô hình bằng cách sử dụng API kiểm duyệt hoặc danh sách từ nhạy cảm.',
+ allEnabled: 'Đã bật kiểm duyệt nội dung ĐẦU VÀO/ĐẦU RA',
+ inputEnabled: 'Đã bật kiểm duyệt nội dung ĐẦU VÀO',
+ outputEnabled: 'Đã bật kiểm duyệt nội dung ĐẦU RA',
modal: {
title: 'Cài đặt kiểm duyệt nội dung',
provider: {
title: 'Nhà cung cấp',
openai: 'Kiểm duyệt OpenAI',
openaiTip: {
- prefix: 'Kiểm duyệt OpenAI yêu cầu một khóa API OpenAI được cấu hình trong ',
+ prefix: 'Kiểm duyệt OpenAI yêu cầu cấu hình khóa API OpenAI trong ',
suffix: '.',
},
keywords: 'Từ khóa',
},
keywords: {
- tip: 'Mỗi dòng một từ khóa, phân tách bằng các dòng. Tối đa 100 ký tự mỗi dòng.',
- placeholder: 'Mỗi dòng một từ khóa, phân tách bằng các dòng',
+ tip: 'Mỗi dòng một từ khóa, phân tách bằng dòng mới. Tối đa 100 ký tự mỗi dòng.',
+ placeholder: 'Mỗi dòng một từ khóa, phân tách bằng dòng mới',
line: 'Dòng',
},
content: {
- input: 'Kiểm duyệt Nội dung ĐẦU VÀO',
- output: 'Kiểm duyệt Nội dung ĐẦU RA',
+ input: 'Kiểm duyệt nội dung ĐẦU VÀO',
+ output: 'Kiểm duyệt nội dung ĐẀU RA',
preset: 'Câu trả lời mẫu',
placeholder: 'Nội dung câu trả lời mẫu ở đây',
condition: 'Đã bật ít nhất một kiểm duyệt nội dung ĐẦU VÀO và ĐẦU RA',
@@ -193,7 +193,7 @@ const translation = {
supportMarkdown: 'Hỗ trợ Markdown',
},
openaiNotConfig: {
- before: 'Kiểm duyệt OpenAI yêu cầu một khóa API OpenAI được cấu hình trong',
+ before: 'Kiểm duyệt OpenAI yêu cầu cấu hình khóa API OpenAI trong',
after: '',
},
},
@@ -202,63 +202,57 @@ const translation = {
automatic: {
title: 'Tự động hóa triển khai ứng dụng',
description: 'Mô tả tình huống của bạn, Dify sẽ tự động hóa một ứng dụng cho bạn.',
- intendedAudience: 'Ai là đối tượng mục tiêu?',
- intendedAudiencePlaceHolder: 'ví dụ: Sinh viên',
- solveProblem: 'Họ hy vọng AI có thể giải quyết vấn đề gì cho họ?',
- solveProblemPlaceHolder: 'ví dụ: Đánh giá thành tích học tập',
- generate: 'Tạo ra',
- audiencesRequired: 'Yêu cầu Đối tượng mục tiêu',
- problemRequired: 'Vấn đề cần thiết',
+ intendedAudience: 'Đối tượng mục tiêu là ai?',
+ intendedAudiencePlaceHolder: 'Ví dụ: Sinh viên',
+ solveProblem: 'Họ hy vọng AI có thể giải quyết vấn đề gì?',
+ solveProblemPlaceHolder: 'Ví dụ: Đánh giá thành tích học tập',
+ generate: 'Tạo',
+ audiencesRequired: 'Yêu cầu nhập đối tượng mục tiêu',
+ problemRequired: 'Yêu cầu nhập vấn đề cần giải quyết',
resTitle: 'Chúng tôi đã tự động hóa ứng dụng sau đây cho bạn.',
apply: 'Áp dụng tự động hóa này',
noData: 'Mô tả tình huống sử dụng của bạn ở bên trái, xem trước tự động hóa sẽ hiển thị ở đây.',
- loading: 'Tự động hóa ứng dụng cho bạn...',
+ loading: 'Đang tự động hóa ứng dụng cho bạn...',
overwriteTitle: 'Ghi đè cấu hình hiện tại?',
overwriteMessage: 'Áp dụng tự động hóa này sẽ ghi đè lên cấu hình hiện tại.',
},
resetConfig: {
title: 'Xác nhận đặt lại?',
- message:
- 'Đặt lại sẽ loại bỏ các thay đổi, khôi phục cấu hình đã xuất bản lần cuối.',
+ message: 'Đặt lại sẽ loại bỏ các thay đổi, khôi phục cấu hình đã xuất bản lần cuối.',
},
errorMessage: {
- nameOfKeyRequired: 'tên của khóa: {{key}} được yêu cầu',
- valueOfVarRequired: 'giá trị {{key}} không thể trống',
- queryRequired: 'Văn bản yêu cầu được yêu cầu.',
- waitForResponse:
- 'Vui lòng đợi phản hồi của tin nhắn trước để hoàn thành.',
- waitForBatchResponse:
- 'Vui lòng đợi phản hồi của tác vụ hàng loạt để hoàn thành.',
+ nameOfKeyRequired: 'Tên của khóa: {{key}} là bắt buộc',
+ valueOfVarRequired: 'Giá trị {{key}} không thể trống',
+ queryRequired: 'Văn bản yêu cầu là bắt buộc.',
+ waitForResponse: 'Vui lòng đợi phản hồi của tin nhắn trước để hoàn thành.',
+ waitForBatchResponse: 'Vui lòng đợi phản hồi của tác vụ hàng loạt để hoàn thành.',
notSelectModel: 'Vui lòng chọn một mô hình',
waitForImgUpload: 'Vui lòng đợi hình ảnh được tải lên',
},
chatSubTitle: 'Hướng dẫn',
- completionSubTitle: 'Tiền Tố Lời Nhắc',
- promptTip:
- 'Lời nhắc hướng dẫn các phản hồi của AI với hướng dẫn và ràng buộc. Chèn biến như {{input}}. Lời nhắc này sẽ không được hiển thị cho người dùng.',
+ completionSubTitle: 'Tiền tố lời nhắc',
+ promptTip: 'Lời nhắc hướng dẫn các phản hồi của AI với hướng dẫn và ràng buộc. Chèn biến như {{input}}. Lời nhắc này sẽ không được hiển thị cho người dùng.',
formattingChangedTitle: 'Định dạng đã thay đổi',
- formattingChangedText:
- 'Thay đổi định dạng sẽ đặt lại khu vực gỡ lỗi, bạn có chắc chắn không?',
+ formattingChangedText: 'Thay đổi định dạng sẽ đặt lại khu vực gỡ lỗi, bạn có chắc chắn không?',
variableTitle: 'Biến',
- variableTip:
- 'Người dùng điền các biến vào một biểu mẫu, tự động thay thế các biến trong lời nhắc.',
- notSetVar: 'Biến cho phép người dùng giới thiệu các từ khóa lời nhắc hoặc mở đầu khi điền vào biểu mẫu. Bạn có thể thử nhập "{{input}}" trong các từ khóa lời nhắc.',
- autoAddVar: 'Biến không xác định được tham chiếu trong tiền-lời nhắc, bạn có muốn thêm chúng vào biểu mẫu đầu vào người dùng không?',
+ variableTip: 'Người dùng điền các biến vào một biểu mẫu, tự động thay thế các biến trong lời nhắc.',
+ notSetVar: 'Biến cho phép người dùng đưa ra từ khóa lời nhắc hoặc mở đầu khi điền vào biểu mẫu. Bạn có thể thử nhập "{{input}}" trong các từ khóa lời nhắc.',
+ autoAddVar: 'Phát hiện biến không xác định được tham chiếu trong tiền-lời nhắc, bạn có muốn thêm chúng vào biểu mẫu đầu vào người dùng không?',
variableTable: {
- key: 'Khóa Biến',
- name: 'Tên Trường Nhập Liệu Người Dùng',
+ key: 'Khóa biến',
+ name: 'Tên trường nhập liệu người dùng',
optional: 'Tùy chọn',
- type: 'Loại Nhập Liệu',
+ type: 'Loại nhập liệu',
action: 'Hành động',
typeString: 'Chuỗi',
- typeSelect: 'Chọn',
+ typeSelect: 'Lựa chọn',
},
varKeyError: {
canNoBeEmpty: 'Khóa biến không thể trống',
tooLong: 'Khóa biến: {{key}} quá dài. Không thể dài hơn 30 ký tự',
notValid: 'Khóa biến: {{key}} không hợp lệ. Chỉ có thể chứa chữ cái, số, và dấu gạch dưới',
notStartWithNumber: 'Khóa biến: {{key}} không thể bắt đầu bằng số',
- keyAlreadyExists: 'Khóa biến: :{{key}} đã tồn tại',
+ keyAlreadyExists: 'Khóa biến: {{key}} đã tồn tại',
},
otherError: {
promptNoBeEmpty: 'Lời nhắc không thể trống',
@@ -266,159 +260,156 @@ const translation = {
queryNoBeEmpty: 'Truy vấn phải được thiết lập trong lời nhắc',
},
variableConig: {
- 'addModalTitle': 'Thêm Trường Nhập',
- 'editModalTitle': 'Chỉnh Sửa Trường Nhập',
+ 'addModalTitle': 'Thêm trường nhập',
+ 'editModalTitle': 'Chỉnh sửa trường nhập',
'description': 'Cài đặt cho biến {{varName}}',
- 'fieldType': 'Loại Trường',
+ 'fieldType': 'Loại trường',
'string': 'Văn bản ngắn',
'text-input': 'Văn bản ngắn',
'paragraph': 'Đoạn văn',
- 'select': 'Chọn',
+ 'select': 'Lựa chọn',
'number': 'Số',
- 'notSet': 'Không thiết lập, hãy thử nhập {{input}} trong gợi ý tiền tố',
+ 'notSet': 'Chưa thiết lập, hãy thử nhập {{input}} trong gợi ý tiền tố',
'stringTitle': 'Tùy chọn hộp văn bản biểu mẫu',
'maxLength': 'Độ dài tối đa',
'options': 'Tùy chọn',
'addOption': 'Thêm tùy chọn',
- 'apiBasedVar': 'Biến Dựa trên API',
- 'varName': 'Tên Biến',
- 'labelName': 'Tên Nhãn',
+ 'apiBasedVar': 'Biến dựa trên API',
+ 'varName': 'Tên biến',
+ 'labelName': 'Tên nhãn',
'inputPlaceholder': 'Vui lòng nhập',
'required': 'Bắt buộc',
'errorMsg': {
varNameRequired: 'Tên biến là bắt buộc',
labelNameRequired: 'Tên nhãn là bắt buộc',
- varNameCanBeRepeat: 'Tên biến không được lặp lại',
- atLeastOneOption: 'Ít nhất một tùy chọn là bắt buộc',
+ varNameCanBeRepeat: 'Tên biến không được trùng lặp',
+ atLeastOneOption: 'Cần ít nhất một tùy chọn',
optionRepeat: 'Có các tùy chọn trùng lặp',
},
},
vision: {
- name: 'Tầm nhìn',
- description: 'Cho phép tầm nhìn sẽ cho phép mô hình nhận hình ảnh và trả lời các câu hỏi về chúng.',
+ name: 'Thị giác',
+ description: 'Cho phép tính năng thị giác sẽ cho phép mô hình nhận diện hình ảnh và trả lời các câu hỏi về chúng.',
settings: 'Cài đặt',
visionSettings: {
- title: 'Cài đặt Tầm nhìn',
+ title: 'Cài đặt thị giác',
resolution: 'Độ phân giải',
resolutionTooltip: `Độ phân giải thấp sẽ cho phép mô hình nhận một phiên bản hình ảnh 512 x 512 thấp hơn, và đại diện cho hình ảnh với ngân sách 65 token. Điều này cho phép API trả về phản hồi nhanh hơn và tiêu thụ ít token đầu vào cho các trường hợp sử dụng không yêu cầu chi tiết cao.
\n
- độ phân giải cao sẽ đầu tiên cho phép mô hình nhìn thấy hình ảnh thấp hơn và sau đó tạo ra các cắt chi tiết của hình ảnh đầu vào dưới dạng hình vuông 512px dựa trên kích thước hình ảnh đầu vào. Mỗi cắt chi tiết sử dụng hai lần ngân sách token cho tổng cộng 129 token.`,
+ Độ phân giải cao sẽ đầu tiên cho phép mô hình nhìn thấy hình ảnh thấp hơn và sau đó tạo ra các cắt chi tiết của hình ảnh đầu vào dưới dạng hình vuông 512px dựa trên kích thước hình ảnh đầu vào. Mỗi cắt chi tiết sử dụng hai lần ngân sách token cho tổng cộng 129 token.`,
high: 'Cao',
low: 'Thấp',
- uploadMethod: 'Phương thức Tải lên',
+ uploadMethod: 'Phương thức tải lên',
both: 'Cả hai',
- localUpload: 'Tải lên Nội bộ',
+ localUpload: 'Tải lên nội bộ',
url: 'URL',
- uploadLimit: 'Giới hạn Tải lên',
+ uploadLimit: 'Giới hạn tải lên',
},
},
voice: {
name: 'Giọng nói',
defaultDisplay: 'Giọng mặc định',
- description: 'Cài đặt giọng nói văn bản thành tiếng',
+ description: 'Cài đặt chuyển đổi văn bản thành giọng nói',
settings: 'Cài đặt',
voiceSettings: {
- title: 'Cài đặt Giọng nói',
+ title: 'Cài đặt giọng nói',
language: 'Ngôn ngữ',
- resolutionTooltip: 'Giọng nói văn bản hỗ trợ ngôn ngữ。',
- voice: 'Giọng',
- autoPlay: 'chạy tự động',
- autoPlayEnabled: 'bật',
- autoPlayDisabled: 'Khép kín',
+ resolutionTooltip: 'Chuyển đổi văn bản thành giọng nói hỗ trợ ngôn ngữ.',
+ voice: 'Giọng nói',
+ autoPlay: 'Tự động phát',
+ autoPlayEnabled: 'Đã bật',
+ autoPlayDisabled: 'Đã tắt',
},
},
openingStatement: {
- title: 'Mở đầu Trò chuyện',
+ title: 'Mở đầu cuộc trò chuyện',
add: 'Thêm',
writeOpener: 'Viết câu mở đầu',
placeholder: 'Viết thông điệp mở đầu của bạn ở đây, bạn có thể sử dụng biến, hãy thử nhập {{biến}}.',
- openingQuestion: 'Câu Hỏi Mở đầu',
- noDataPlaceHolder:
- 'Bắt đầu cuộc trò chuyện với người dùng có thể giúp AI thiết lập một mối quan hệ gần gũi hơn với họ trong các ứng dụng trò chuyện.',
+ openingQuestion: 'Câu hỏi mở đầu',
+ noDataPlaceHolder: 'Bắt đầu cuộc trò chuyện với người dùng có thể giúp AI thiết lập mối quan hệ gần gũi hơn với họ trong các ứng dụng trò chuyện.',
varTip: 'Bạn có thể sử dụng biến, hãy thử nhập {{biến}}',
- tooShort: 'Ít nhất 20 từ của lời nhắc ban đầu được yêu cầu để tạo ra các lời nhận đầu tiên cho cuộc trò chuyện.',
- notIncludeKey: 'Lời nhắc ban đầu không bao gồm biến: {{khóa}}. Vui lòng thêm nó vào lời nhắc ban đầu.',
+ tooShort: 'Cần ít nhất 20 từ trong lời nhắc ban đầu để tạo ra các câu mở đầu cho cuộc trò chuyện.',
+ notIncludeKey: 'Lời nhắc ban đầu không bao gồm biến: {{key}}. Vui lòng thêm nó vào lời nhắc ban đầu.',
},
modelConfig: {
model: 'Mô hình',
- setTone: 'Thiết lập tông của phản hồi',
- title: 'Mô hình và Tham số',
+ setTone: 'Thiết lập giọng điệu của phản hồi',
+ title: 'Mô hình và tham số',
modeType: {
chat: 'Trò chuyện',
completion: 'Hoàn thành',
},
},
inputs: {
- title: 'Gỡ Lỗi và Xem Trước',
+ title: 'Gỡ lỗi và xem trước',
noPrompt: 'Hãy thử viết một số lời nhắc trong trường tiền-lời nhắc',
- userInputField: 'Trường Nhập Liệu Người Dùng',
+ userInputField: 'Trường nhập liệu người dùng',
noVar: 'Điền vào giá trị của biến, nó sẽ tự động thay thế từ khóa lời nhắc mỗi khi bắt đầu phiên mới.',
- chatVarTip:
- 'Điền vào giá trị của biến, nó sẽ tự động thay thế từ khóa lời nhắc mỗi khi bắt đầu phiên mới',
- completionVarTip:
- 'Điền vào giá trị của biến, nó sẽ tự động thay thế từ khóa lời nhắc mỗi khi một câu hỏi được gửi.',
- previewTitle: 'Xem Trước Lời Nhắc',
- queryTitle: 'Nội dung Truy vấn',
+ chatVarTip: 'Điền vào giá trị của biến, nó sẽ tự động thay thế từ khóa lời nhắc mỗi khi bắt đầu phiên mới',
+ completionVarTip: 'Điền vào giá trị của biến, nó sẽ tự động thay thế từ khóa lời nhắc mỗi khi một câu hỏi được gửi.',
+ previewTitle: 'Xem trước lời nhắc',
+ queryTitle: 'Nội dung truy vấn',
queryPlaceholder: 'Vui lòng nhập văn bản yêu cầu.',
run: 'CHẠY',
},
- result: 'Văn bản Đầu Ra',
+ result: 'Văn bản đầu ra',
datasetConfig: {
- settingTitle: 'Cài đặt Truy xuất',
- knowledgeTip: 'Nhấn vào nút “+” để thêm kiến thức',
+ settingTitle: 'Cài đặt truy xuất',
+ knowledgeTip: 'Nhấn vào nút "+" để thêm kiến thức',
retrieveOneWay: {
- title: 'N-to-1 Truy xuất',
- description: 'Dựa trên ý định của người dùng và mô tả Kiến thức, Agent tự động chọn Kiến thức tốt nhất để truy vấn. Tốt nhất cho các ứng dụng có Kiến thức cụ thể, giới hạn.',
+ title: 'Truy xuất N-to-1',
+ description: 'Dựa trên ý định của người dùng và mô tả kiến thức, Agent tự động chọn kiến thức tốt nhất để truy vấn. Phù hợp nhất cho các ứng dụng có kiến thức cụ thể, giới hạn.',
},
retrieveMultiWay: {
title: 'Truy xuất đa hướng',
- description: 'Dựa trên ý định của người dùng, truy vấn qua tất cả Kiến thức, truy xuất văn bản liên quan từ nhiều nguồn và chọn ra kết quả tốt nhất phù hợp với truy vấn của người dùng sau khi sắp xếp lại. Yêu cầu cấu hình của API Rerank model.',
+ description: 'Dựa trên ý định của người dùng, truy vấn qua tất cả kiến thức, truy xuất văn bản liên quan từ nhiều nguồn và chọn ra kết quả tốt nhất phù hợp với truy vấn của người dùng sau khi sắp xếp lại. Yêu cầu cấu hình của API mô hình Rerank.',
},
- rerankModelRequired: 'Rerank model là bắt buộc',
+ rerankModelRequired: 'Mô hình Rerank là bắt buộc',
params: 'Tham số',
top_k: 'Top K',
- top_kTip: 'Sử dụng để lọc các phần chính xác nhất với các câu hỏi của người dùng. Hệ thống cũng sẽ tự động điều chỉnh giá trị của Top K, theo max_tokens của mô hình đã chọn.',
- score_threshold: 'Ngưỡng Điểm',
+ top_kTip: 'Sử dụng để lọc các phần chính xác nhất với câu hỏi của người dùng. Hệ thống cũng sẽ tự động điều chỉnh giá trị của Top K, theo max_tokens của mô hình đã chọn.',
+ score_threshold: 'Ngưỡng điểm',
score_thresholdTip: 'Sử dụng để thiết lập ngưỡng tương đồng cho việc lọc các phần.',
- retrieveChangeTip: 'Thay đổi chế độ chỉ mục và chế độ truy xuất có thể ảnh hưởng đến các ứng dụng liên quan đến Kiến thức này.',
+ retrieveChangeTip: 'Thay đổi chế độ chỉ mục và chế độ truy xuất có thể ảnh hưởng đến các ứng dụng liên quan đến kiến thức này.',
},
- debugAsSingleModel: 'Gỡ Lỗi như Một Mô hình',
- debugAsMultipleModel: 'Gỡ Lỗi như Nhiều Mô hình',
+ debugAsSingleModel: 'Gỡ lỗi như một mô hình',
+ debugAsMultipleModel: 'Gỡ lỗi như nhiều mô hình',
duplicateModel: 'Sao chép',
publishAs: 'Xuất bản dưới dạng',
assistantType: {
- name: 'Loại Trợ lý',
+ name: 'Loại trợ lý',
chatAssistant: {
- name: 'Trợ lý Cơ bản',
+ name: 'Trợ lý cơ bản',
description: 'Xây dựng một trợ lý dựa trên trò chuyện sử dụng một Mô hình Ngôn ngữ Lớn.',
},
agentAssistant: {
- name: 'Trợ lý Tác nhân',
- description: 'Xây dựng một Tác nhân thông minh có thể tự động chọn các công cụ để hoàn thành các nhiệm vụ.',
+ name: 'Trợ lý tác nhân',
+ description: 'Xây dựng một tác nhân thông minh có thể tự động chọn các công cụ để hoàn thành các nhiệm vụ.',
},
},
agent: {
- agentMode: 'Chế độ Tác nhân',
+ agentMode: 'Chế độ tác nhân',
agentModeDes: 'Thiết lập loại chế độ suy luận cho tác nhân',
agentModeType: {
ReACT: 'ReAct',
- functionCall: 'Gọi Hàm',
+ functionCall: 'Gọi hàm',
},
setting: {
- name: 'Thiết lập Tác nhân',
- description: 'Thiết lập Tác nhân cho phép thiết lập chế độ tác nhân và các tính năng nâng cao như các lời nhắc tích hợp sẵn, chỉ có sẵn trong loại Tác nhân.',
+ name: 'Thiết lập tác nhân',
+ description: 'Thiết lập tác nhân cho phép cấu hình chế độ tác nhân và các tính năng nâng cao như lời nhắc tích hợp sẵn, chỉ có sẵn trong loại Tác nhân.',
maximumIterations: {
- name: 'Số Lần Lặp Tối đa',
- description: 'Giới hạn số lần lặp một trợ lý tác nhân có thể thực hiện',
+ name: 'Số lần lặp tối đa',
+ description: 'Giới hạn số lần lặp mà một trợ lý tác nhân có thể thực hiện',
},
},
- buildInPrompt: 'Lời Nhắc Tích Hợp',
- firstPrompt: 'Tiền-lời Nhắc Đầu Tiên',
- nextIteration: 'Lần Lặp Tiếp Theo',
- promptPlaceholder: 'Viết tiền-lời nhắc của bạn ở đây',
+ buildInPrompt: 'Lời nhắc tích hợp',
+ firstPrompt: 'Lời nhắc đầu tiên',
+ nextIteration: 'Lần lặp tiếp theo',
+ promptPlaceholder: 'Viết lời nhắc của bạn ở đây',
tools: {
name: 'Công cụ',
- description: 'Sử dụng công cụ có thể mở rộng các khả năng của LLM, như tìm kiếm trên internet hoặc thực hiện các phép tính khoa học',
+ description: 'Sử dụng công cụ có thể mở rộng khả năng của LLM, như tìm kiếm trên internet hoặc thực hiện các phép tính khoa học',
enabled: 'Đã kích hoạt',
},
},
diff --git a/web/i18n/vi-VN/app-log.ts b/web/i18n/vi-VN/app-log.ts
index 193927c91d8b67..7440be61268795 100644
--- a/web/i18n/vi-VN/app-log.ts
+++ b/web/i18n/vi-VN/app-log.ts
@@ -5,85 +5,85 @@ const translation = {
table: {
header: {
time: 'Thời gian',
- endUser: 'Người Dùng Cuối',
- input: 'Đầu Vào',
- output: 'Đầu Ra',
- summary: 'Tiêu Đề',
- messageCount: 'Số Lượng Tin Nhắn',
- userRate: 'Tỷ Lệ Người Dùng',
- adminRate: 'Tỷ Lệ Quản Trị',
+ endUser: 'Người dùng cuối',
+ input: 'Đầu vào',
+ output: 'Đầu ra',
+ summary: 'Tóm tắt',
+ messageCount: 'Số lượng tin nhắn',
+ userRate: 'Đánh giá người dùng',
+ adminRate: 'Đánh giá quản trị viên',
startTime: 'THỜI GIAN BẮT ĐẦU',
status: 'TRẠNG THÁI',
runtime: 'THỜI GIAN CHẠY',
- tokens: 'MÃ',
+ tokens: 'TOKEN',
user: 'NGƯỜI DÙNG CUỐI',
version: 'PHIÊN BẢN',
},
pagination: {
previous: 'Trước',
- next: 'Tiếp',
+ next: 'Sau',
},
empty: {
noChat: 'Chưa có cuộc trò chuyện',
noOutput: 'Không có đầu ra',
element: {
- title: 'Có ai đó ở đó không?',
- content: 'Quan sát và chú thích các tương tác giữa người dùng cuối và ứng dụng trí tuệ nhân tạo ở đây để liên tục cải thiện độ chính xác của trí tuệ nhân tạo. Bạn có thể thử chia sẻ hoặc kiểm tra ứng dụng Web một cách tự nhiên, sau đó quay lại trang này.',
+ title: 'Chưa có dữ liệu',
+ content: 'Quan sát và chú thích các tương tác giữa người dùng cuối và ứng dụng trí tuệ nhân tạo ở đây để liên tục cải thiện độ chính xác của AI. Bạn có thể thử chia sẻ hoặc kiểm tra ứng dụng Web, sau đó quay lại trang này.',
},
},
},
detail: {
time: 'Thời gian',
- conversationId: 'ID Cuộc Trò Chuyện',
- promptTemplate: 'Mẫu Nhắc Nhở',
- promptTemplateBeforeChat: 'Mẫu Nhắc Nhở Trước Trò Chuyện · Như Tin Nhắn Hệ Thống',
- annotationTip: 'Cải Thiện Được Đánh Dấu bởi {{user}}',
+ conversationId: 'ID cuộc trò chuyện',
+ promptTemplate: 'Mẫu lời nhắc',
+ promptTemplateBeforeChat: 'Mẫu lời nhắc trước trò chuyện · Tin nhắn hệ thống',
+ annotationTip: 'Cải thiện được đánh dấu bởi {{user}}',
timeConsuming: '',
second: 'giây',
- tokenCost: 'Chi Phí Mã',
- loading: 'đang tải',
+ tokenCost: 'Chi phí token',
+ loading: 'Đang tải',
operation: {
- like: 'thích',
- dislike: 'không thích',
- addAnnotation: 'Thêm Cải Thiện',
- editAnnotation: 'Chỉnh Sửa Cải Thiện',
- annotationPlaceholder: 'Nhập câu trả lời mong muốn mà bạn muốn trí tuệ nhân tạo trả lời, điều này có thể được sử dụng để điều chỉnh mô hình và cải thiện liên tục chất lượng sinh văn bản trong tương lai.',
+ like: 'Thích',
+ dislike: 'Không thích',
+ addAnnotation: 'Thêm chú thích',
+ editAnnotation: 'Chỉnh sửa chú thích',
+ annotationPlaceholder: 'Nhập câu trả lời mong muốn từ AI. Điều này sẽ được sử dụng để điều chỉnh mô hình và cải thiện chất lượng sinh văn bản trong tương lai.',
},
variables: 'Biến',
- uploadImages: 'Ảnh Đã Tải Lên',
+ uploadImages: 'Ảnh đã tải lên',
},
filter: {
period: {
today: 'Hôm nay',
- last7days: '7 Ngày Qua',
- last4weeks: '4 Tuần Qua',
- last3months: '3 Tháng Qua',
- last12months: '12 Tháng Qua',
- monthToDate: 'Tháng Đến Hiện Tại',
- quarterToDate: 'Quý Đến Hiện Tại',
- yearToDate: 'Năm Đến Hiện Tại',
- allTime: 'Tất Cả Thời Gian',
+ last7days: '7 ngày qua',
+ last4weeks: '4 tuần qua',
+ last3months: '3 tháng qua',
+ last12months: '12 tháng qua',
+ monthToDate: 'Tháng hiện tại',
+ quarterToDate: 'Quý hiện tại',
+ yearToDate: 'Năm hiện tại',
+ allTime: 'Tất cả thời gian',
},
annotation: {
all: 'Tất cả',
- annotated: 'Cải Thiện Đã Đánh Dấu ({{count}} mục)',
- not_annotated: 'Chưa Đánh Dấu',
+ annotated: 'Đã chú thích ({{count}} mục)',
+ not_annotated: 'Chưa chú thích',
},
},
- workflowTitle: 'Nhật Ký Quy Trình Làm Việc',
- workflowSubtitle: 'Nhật ký ghi lại hoạt động của Tự Động Hóa.',
+ workflowTitle: 'Nhật ký quy trình làm việc',
+ workflowSubtitle: 'Nhật ký ghi lại hoạt động của Tự động hóa.',
runDetail: {
- title: 'Nhật Ký Cuộc Trò Chuyện',
- workflowTitle: 'Chi Tiết Nhật Ký',
+ title: 'Nhật ký cuộc trò chuyện',
+ workflowTitle: 'Chi tiết nhật ký',
},
- promptLog: 'Nhật Ký Nhắc Nhở',
- AgentLog: 'Nhật ký đại lý',
+ promptLog: 'Nhật ký lời nhắc',
+ AgentLog: 'Nhật ký tác nhân',
viewLog: 'Xem nhật ký',
agentLogDetail: {
- AgentMode: 'Chế độ đại lý',
- toolUsed: 'Công cụ được sử dụng',
- iterations: 'Lặp lại',
- iteration: 'Lặp lại',
+ AgentMode: 'Chế độ tác nhân',
+ toolUsed: 'Công cụ đã sử dụng',
+ iterations: 'Số lần lặp',
+ iteration: 'Lần lặp',
finalProcessing: 'Xử lý cuối cùng',
},
}
diff --git a/web/i18n/vi-VN/app-overview.ts b/web/i18n/vi-VN/app-overview.ts
index c023850d7160a6..55a53d73a27047 100644
--- a/web/i18n/vi-VN/app-overview.ts
+++ b/web/i18n/vi-VN/app-overview.ts
@@ -2,153 +2,153 @@ const translation = {
welcome: {
firstStepTip: 'Để bắt đầu,',
enterKeyTip: 'nhập khóa API OpenAI của bạn bên dưới',
- getKeyTip: 'Lấy API Key của bạn từ bảng điều khiển OpenAI',
- placeholder: 'Khóa API OpenAI của bạn (vd. sk-xxxx)',
+ getKeyTip: 'Lấy khóa API từ bảng điều khiển OpenAI',
+ placeholder: 'Khóa API OpenAI của bạn (ví dụ: sk-xxxx)',
},
apiKeyInfo: {
cloud: {
trial: {
- title: 'Bạn đang sử dụng hạn mức thử nghiệm của {{providerName}}.',
- description: 'Hạn mức thử nghiệm được cung cấp cho việc kiểm tra của bạn. Trước khi hết lượt gọi hạn mức thử nghiệm, vui lòng thiết lập nhà cung cấp mô hình của riêng bạn hoặc mua thêm hạn mức.',
+ title: 'Bạn đang sử dụng gói dùng thử của {{providerName}}.',
+ description: 'Gói dùng thử được cung cấp để bạn kiểm tra. Trước khi hết lượt gọi của gói dùng thử, vui lòng thiết lập nhà cung cấp mô hình riêng hoặc mua thêm hạn mức.',
},
exhausted: {
- title: 'Hạn mức thử nghiệm của bạn đã được sử dụng hết, vui lòng thiết lập APIKey của bạn.',
- description: 'Hạn mức thử nghiệm của bạn đã được sử dụng hết. Vui lòng thiết lập nhà cung cấp mô hình của riêng bạn hoặc mua thêm hạn mức.',
+ title: 'Gói dùng thử của bạn đã hết, vui lòng thiết lập khóa API của bạn.',
+ description: 'Gói dùng thử của bạn đã hết. Vui lòng thiết lập nhà cung cấp mô hình riêng hoặc mua thêm hạn mức.',
},
},
selfHost: {
title: {
row1: 'Để bắt đầu,',
- row2: 'thiết lập nhà cung cấp mô hình của bạn trước.',
+ row2: 'hãy thiết lập nhà cung cấp mô hình của bạn trước.',
},
},
callTimes: 'Số lần gọi',
usedToken: 'Token đã sử dụng',
- setAPIBtn: 'Đi đến thiết lập nhà cung cấp mô hình',
- tryCloud: 'Hoặc thử phiên bản điện toán đám mây của Dify với báo giá miễn phí',
+ setAPIBtn: 'Thiết lập nhà cung cấp mô hình',
+ tryCloud: 'Hoặc dùng thử phiên bản đám mây của Dify với gói miễn phí',
},
overview: {
title: 'Tổng quan',
appInfo: {
explanation: 'Ứng dụng web AI sẵn sàng sử dụng',
- accessibleAddress: 'Địa chỉ công cộng',
+ accessibleAddress: 'Địa chỉ công khai',
preview: 'Xem trước',
regenerate: 'Tạo lại',
- regenerateNotice: 'Bạn có muốn tạo lại địa chỉ công cộng không?',
+ regenerateNotice: 'Bạn có muốn tạo lại địa chỉ công khai không?',
preUseReminder: 'Vui lòng kích hoạt ứng dụng web trước khi tiếp tục.',
settings: {
entry: 'Cài đặt',
title: 'Cài đặt ứng dụng web',
webName: 'Tên ứng dụng web',
webDesc: 'Mô tả ứng dụng web',
- webDescTip: 'Văn bản này sẽ được hiển thị ở phía máy khách, cung cấp hướng dẫn cơ bản về cách sử dụng ứng dụng',
+ webDescTip: 'Văn bản này sẽ hiển thị ở phía người dùng, cung cấp hướng dẫn cơ bản về cách sử dụng ứng dụng',
webDescPlaceholder: 'Nhập mô tả của ứng dụng web',
language: 'Ngôn ngữ',
workflow: {
- title: 'Các Bước Quy trình',
+ title: 'Các bước quy trình',
show: 'Hiển thị',
hide: 'Ẩn',
},
- chatColorTheme: 'Chủ đề màu sắc trò chuyện',
- chatColorThemeDesc: 'Thiết lập chủ đề màu sắc của chatbot',
+ chatColorTheme: 'Giao diện màu trò chuyện',
+ chatColorThemeDesc: 'Thiết lập giao diện màu của chatbot',
chatColorThemeInverted: 'Đảo ngược',
- invalidHexMessage: 'Giá trị không hợp lệ của hệ màu hex',
+ invalidHexMessage: 'Giá trị mã màu không hợp lệ',
more: {
entry: 'Hiển thị thêm cài đặt',
copyright: 'Bản quyền',
copyRightPlaceholder: 'Nhập tên tác giả hoặc tổ chức',
privacyPolicy: 'Chính sách bảo mật',
privacyPolicyPlaceholder: 'Nhập liên kết chính sách bảo mật',
- privacyPolicyTip: 'Giúp khách truy cập hiểu được dữ liệu mà ứng dụng thu thập, xem Chính sách bảo mật của Dify.',
- customDisclaimer: 'Tùy chỉnh từ chối trách nhiệm',
- customDisclaimerPlaceholder: 'Nhập liên kết từ chối trách nhiệm',
- customDisclaimerTip: 'Liên kết này sẽ được hiển thị ở phía máy khách, cung cấp thông tin về trách nhiệm của ứng dụng',
+ privacyPolicyTip: 'Giúp người dùng hiểu dữ liệu mà ứng dụng thu thập, xem Chính sách bảo mật của Dify.',
+ customDisclaimer: 'Tuyên bố từ chối trách nhiệm tùy chỉnh',
+ customDisclaimerPlaceholder: 'Nhập liên kết tuyên bố từ chối trách nhiệm',
+ customDisclaimerTip: 'Liên kết này sẽ hiển thị ở phía người dùng, cung cấp thông tin về trách nhiệm của ứng dụng',
},
},
embedded: {
entry: 'Nhúng',
title: 'Nhúng vào trang web',
explanation: 'Chọn cách nhúng ứng dụng trò chuyện vào trang web của bạn',
- iframe: 'Để thêm ứng dụng trò chuyện ở bất kỳ đâu trên trang web của bạn, hãy thêm iframe này vào mã HTML của bạn.',
- scripts: 'Để thêm ứng dụng trò chuyện vào góc dưới bên phải của trang web của bạn, thêm mã này vào mã HTML của bạn.',
- chromePlugin: 'Cài đặt Tiện ích Mở rộng Dify Chatbot cho Chrome',
+ iframe: 'Để thêm ứng dụng trò chuyện vào bất kỳ đâu trên trang web của bạn, hãy thêm iframe này vào mã HTML của bạn.',
+ scripts: 'Để thêm ứng dụng trò chuyện vào góc dưới bên phải của trang web, thêm mã này vào mã HTML của bạn.',
+ chromePlugin: 'Cài đặt tiện ích mở rộng Dify Chatbot cho Chrome',
copied: 'Đã sao chép',
copy: 'Sao chép',
},
qrcode: {
title: 'Mã QR để chia sẻ',
- scan: 'Quét và Chia sẻ Ứng dụng',
- download: 'Tải xuống Mã QR',
+ scan: 'Quét và chia sẻ ứng dụng',
+ download: 'Tải xuống mã QR',
},
customize: {
way: 'cách',
entry: 'Tùy chỉnh',
title: 'Tùy chỉnh ứng dụng web AI',
- explanation: 'Bạn có thể tùy chỉnh giao diện phía trước của Ứng dụng Web để phù hợp với kịch bản và nhu cầu phong cách của mình.',
+ explanation: 'Bạn có thể tùy chỉnh giao diện của ứng dụng web để phù hợp với kịch bản và phong cách mong muốn.',
way1: {
- name: 'Fork mã nguồn của máy khách, sửa đổi và triển khai lên Vercel (được khuyến nghị)',
- step1: 'Fork mã nguồn của máy khách và sửa đổi',
- step1Tip: 'Nhấp vào đây để fork mã nguồn vào tài khoản GitHub của bạn và sửa đổi mã',
+ name: 'Fork mã nguồn phía client, sửa đổi và triển khai lên Vercel (khuyến nghị)',
+ step1: 'Fork mã nguồn phía client và sửa đổi',
+ step1Tip: 'Nhấp vào đây để fork mã nguồn vào tài khoản GitHub của bạn và sửa đổi',
step1Operation: 'Dify-WebClient',
step2: 'Triển khai lên Vercel',
- step2Tip: 'Nhấp vào đây để nhập kho vào Vercel và triển khai',
- step2Operation: 'Nhập kho',
+ step2Tip: 'Nhấp vào đây để nhập kho lưu trữ vào Vercel và triển khai',
+ step2Operation: 'Nhập kho lưu trữ',
step3: 'Cấu hình biến môi trường',
step3Tip: 'Thêm các biến môi trường sau vào Vercel',
},
way2: {
- name: 'Viết mã phía máy khách để gọi API và triển khai lên một máy chủ',
+ name: 'Viết mã phía client để gọi API và triển khai lên máy chủ',
operation: 'Tài liệu',
},
},
},
apiInfo: {
- title: 'API Dịch vụ Backend',
+ title: 'API dịch vụ backend',
explanation: 'Dễ dàng tích hợp vào ứng dụng của bạn',
- accessibleAddress: 'Điểm cuối Dịch vụ API',
+ accessibleAddress: 'Điểm cuối dịch vụ API',
doc: 'Tài liệu tham khảo API',
},
status: {
running: 'Đang hoạt động',
- disable: 'Tắt',
+ disable: 'Đã tắt',
},
},
analysis: {
title: 'Phân tích',
ms: 'ms',
- tokenPS: 'Token/s',
+ tokenPS: 'Token/giây',
totalMessages: {
- title: 'Tổng Số Tin Nhắn',
- explanation: 'Số lần tương tác AI hàng ngày; không tính việc kỹ thuật hóa/nhái lại câu hỏi.',
+ title: 'Tổng số tin nhắn',
+ explanation: 'Số lần tương tác AI hàng ngày; không tính việc tạo lại/lặp lại câu hỏi.',
},
activeUsers: {
- title: 'Người Dùng Hoạt Động',
- explanation: 'Người dùng duy nhất tham gia trò chuyện với AI; không tính việc kỹ thuật hóa/nhái lại câu hỏi.',
+ title: 'Người dùng hoạt động',
+ explanation: 'Số người dùng duy nhất tham gia trò chuyện với AI; không tính việc tạo lại/lặp lại câu hỏi.',
},
tokenUsage: {
- title: 'Sử Dụng Token',
- explanation: 'Phản ánh việc sử dụng hàng ngày của mô hình ngôn ngữ cho ứng dụng, hữu ích cho mục đích kiểm soát chi phí.',
- consumed: 'Đã Sử Dụng',
+ title: 'Sử dụng token',
+ explanation: 'Phản ánh việc sử dụng mô hình ngôn ngữ hàng ngày cho ứng dụng, hữu ích cho mục đích kiểm soát chi phí.',
+ consumed: 'Đã sử dụng',
},
avgSessionInteractions: {
- title: 'Trung Bình Tương Tác Phiên',
- explanation: 'Số lần giao tiếp liên tục giữa người dùng và AI; cho các ứng dụng dựa trên cuộc trò chuyện.',
+ title: 'Trung bình tương tác mỗi phiên',
+ explanation: 'Số lần giao tiếp liên tục giữa người dùng và AI; áp dụng cho các ứng dụng dựa trên trò chuyện.',
},
avgUserInteractions: {
- title: 'Trung Bình Tương Tác Người Dùng',
- explanation: 'Phản ánh tần suất sử dụng hàng ngày của người dùng. Số liệu này phản ánh sự kết dính của người dùng.',
+ title: 'Trung bình tương tác mỗi người dùng',
+ explanation: 'Phản ánh tần suất sử dụng hàng ngày của người dùng. Chỉ số này cho biết mức độ gắn kết của người dùng.',
},
userSatisfactionRate: {
- title: 'Tỷ Lệ Hài Lòng của Người Dùng',
- explanation: 'Số lượt thích trên mỗi 1.000 tin nhắn. Điều này cho biết tỷ lệ câu trả lời mà người dùng rất hài lòng.',
+ title: 'Tỷ lệ hài lòng của người dùng',
+ explanation: 'Số lượt thích trên mỗi 1.000 tin nhắn. Chỉ số này cho biết tỷ lệ câu trả lời mà người dùng rất hài lòng.',
},
avgResponseTime: {
- title: 'Thời Gian Trả Lời Trung Bình',
- explanation: 'Thời gian (ms) để AI xử lý/phản hồi; cho các ứng dụng dựa trên văn bản.',
+ title: 'Thời gian phản hồi trung bình',
+ explanation: 'Thời gian (ms) để AI xử lý/phản hồi; áp dụng cho các ứng dụng dựa trên văn bản.',
},
tps: {
- title: 'Tốc Độ Đầu Ra Token',
- explanation: 'Đo lường hiệu suất của LLM. Đếm tốc độ đầu ra Token của LLM từ khi bắt đầu yêu cầu đến khi hoàn thành đầu ra.',
+ title: 'Tốc độ đầu ra token',
+ explanation: 'Đo lường hiệu suất của LLM. Đếm tốc độ đầu ra token của LLM từ khi bắt đầu yêu cầu đến khi hoàn thành đầu ra.',
},
},
}
diff --git a/web/i18n/vi-VN/app.ts b/web/i18n/vi-VN/app.ts
index ca7eedc434813d..c8f3167d7c6fd0 100644
--- a/web/i18n/vi-VN/app.ts
+++ b/web/i18n/vi-VN/app.ts
@@ -3,19 +3,19 @@ const translation = {
types: {
all: 'Tất cả',
chatbot: 'Chatbot',
- agent: 'Đại lý',
+ agent: 'Tác nhân',
workflow: 'Quy trình',
completion: 'Hoàn thành',
},
duplicate: 'Sao chép',
- duplicateTitle: 'Sao chép Ứng dụng',
+ duplicateTitle: 'Sao chép ứng dụng',
export: 'Xuất DSL',
exportFailed: 'Xuất DSL thất bại.',
importDSL: 'Nhập tệp DSL',
createFromConfigFile: 'Tạo từ tệp DSL',
deleteAppConfirmTitle: 'Xóa ứng dụng này?',
deleteAppConfirmContent:
- 'Việc xóa ứng dụng là không thể đảo ngược. Người dùng sẽ không thể truy cập vào ứng dụng của bạn nữa và tất cả cấu hình và nhật ký nhắc sẽ bị xóa vĩnh viễn.',
+ 'Việc xóa ứng dụng là không thể hoàn tác. Người dùng sẽ không thể truy cập vào ứng dụng của bạn nữa và tất cả cấu hình cũng như nhật ký nhắc sẽ bị xóa vĩnh viễn.',
appDeleted: 'Ứng dụng đã bị xóa',
appDeleteFailed: 'Không thể xóa ứng dụng',
join: 'Tham gia cộng đồng',
@@ -23,23 +23,23 @@ const translation = {
'Thảo luận với các thành viên nhóm, người đóng góp và nhà phát triển trên các kênh khác nhau.',
roadmap: 'Xem lộ trình của chúng tôi',
newApp: {
- startFromBlank: 'Tạo từ Trắng',
- startFromTemplate: 'Tạo từ Mẫu',
- captionAppType: 'Loại ứng dụng bạn muốn tạo?',
- chatbotDescription: 'Xây dựng một ứng dụng dựa trên trò chuyện. Ứng dụng này sử dụng định dạng câu hỏi và trả lời, cho phép nhiều vòng trò chuyện liên tục.',
- completionDescription: 'Xây dựng một ứng dụng tạo văn bản chất lượng cao dựa trên các gợi ý, như tạo bài báo, tóm tắt, dịch và nhiều hơn nữa.',
- completionWarning: 'Loại ứng dụng này sẽ không được hỗ trợ nữa.',
- agentDescription: 'Xây dựng một Đại lý thông minh có thể tự động chọn công cụ để hoàn thành các nhiệm vụ',
- workflowDescription: 'Xây dựng một ứng dụng tạo văn bản chất lượng cao dựa trên quy trình làm việc với mức độ tùy chỉnh cao. Nó phù hợp cho người dùng có kinh nghiệm.',
+ startFromBlank: 'Tạo mới',
+ startFromTemplate: 'Tạo từ mẫu',
+ captionAppType: 'Bạn muốn tạo loại ứng dụng nào?',
+ chatbotDescription: 'Xây dựng một ứng dụng trò chuyện. Ứng dụng này sử dụng định dạng hỏi đáp, cho phép nhiều vòng trò chuyện liên tục.',
+ completionDescription: 'Xây dựng một ứng dụng tạo văn bản chất lượng cao dựa trên gợi ý, như tạo bài viết, tóm tắt, dịch thuật và nhiều hơn nữa.',
+ completionWarning: 'Loại ứng dụng này sẽ không được hỗ trợ trong tương lai.',
+ agentDescription: 'Xây dựng một tác nhân thông minh có thể tự động chọn công cụ để hoàn thành các nhiệm vụ',
+ workflowDescription: 'Xây dựng một ứng dụng tạo văn bản chất lượng cao dựa trên quy trình làm việc với mức độ tùy chỉnh cao. Phù hợp cho người dùng có kinh nghiệm.',
workflowWarning: 'Hiện đang trong phiên bản beta',
chatbotType: 'Phương pháp quản lý Chatbot',
basic: 'Cơ bản',
basicTip: 'Dành cho người mới bắt đầu, có thể chuyển sang Chatflow sau này',
basicFor: 'DÀNH CHO NGƯỜI MỚI BẮT ĐẦU',
- basicDescription: 'Quản lý Cơ bản cho phép quản lý ứng dụng Chatbot bằng cách sử dụng các cài đặt đơn giản, mà không cần phải sửa đổi các nhắc nhở tích hợp sẵn. Nó phù hợp cho người mới bắt đầu.',
+ basicDescription: 'Quản lý cơ bản cho phép quản lý ứng dụng Chatbot bằng cách sử dụng các cài đặt đơn giản, không cần sửa đổi các lời nhắc tích hợp sẵn. Phù hợp cho người mới bắt đầu.',
advanced: 'Chatflow',
advancedFor: 'Dành cho người dùng có kinh nghiệm',
- advancedDescription: 'Quản lý Quy trình quản lý Chatbot dưới dạng các quy trình làm việc, cung cấp mức độ tùy chỉnh cao, bao gồm khả năng chỉnh sửa các nhắc nhở tích hợp sẵn. Nó phù hợp cho người dùng có kinh nghiệm.',
+ advancedDescription: 'Quản lý Chatbot dưới dạng các quy trình làm việc, cung cấp mức độ tùy chỉnh cao, bao gồm khả năng chỉnh sửa các lời nhắc tích hợp sẵn. Phù hợp cho người dùng có kinh nghiệm.',
captionName: 'Biểu tượng và tên ứng dụng',
appNamePlaceholder: 'Đặt tên cho ứng dụng của bạn',
captionDescription: 'Mô tả',
@@ -48,11 +48,11 @@ const translation = {
previewDemo: 'Xem trước demo',
chatApp: 'Trợ lý',
chatAppIntro:
- 'Tôi muốn xây dựng một ứng dụng dựa trên trò chuyện. Ứng dụng này sử dụng định dạng câu hỏi và trả lời, cho phép nhiều vòng trò chuyện liên tục.',
- agentAssistant: 'Trợ lý Đại lý mới',
+ 'Tôi muốn xây dựng một ứng dụng trò chuyện. Ứng dụng này sử dụng định dạng hỏi đáp, cho phép nhiều vòng trò chuyện liên tục.',
+ agentAssistant: 'Trợ lý tác nhân mới',
completeApp: 'Máy tạo văn bản',
completeAppIntro:
- 'Tôi muốn tạo một ứng dụng tạo văn bản chất lượng cao dựa trên các gợi ý, như tạo bài báo, tóm tắt, dịch và nhiều hơn nữa.',
+ 'Tôi muốn tạo một ứng dụng tạo văn bản chất lượng cao dựa trên gợi ý, như tạo bài viết, tóm tắt, dịch thuật và nhiều hơn nữa.',
showTemplates: 'Tôi muốn chọn từ mẫu',
hideTemplates: 'Quay lại chế độ lựa chọn',
Create: 'Tạo',
@@ -63,28 +63,64 @@ const translation = {
appCreated: 'Ứng dụng đã được tạo',
appCreateFailed: 'Không thể tạo ứng dụng',
},
- editApp: 'Chỉnh sửa Thông tin',
- editAppTitle: 'Chỉnh sửa Thông tin Ứng dụng',
+ editApp: 'Chỉnh sửa thông tin',
+ editAppTitle: 'Chỉnh sửa thông tin ứng dụng',
editDone: 'Thông tin ứng dụng đã được cập nhật',
editFailed: 'Không thể cập nhật thông tin ứng dụng',
emoji: {
ok: 'Đồng ý',
cancel: 'Hủy',
},
- switch: 'Chuyển sang Quy trình Quản lý',
- switchTipStart: 'Một bản sao ứng dụng mới sẽ được tạo cho bạn và bản sao mới sẽ chuyển sang Quy trình Quản lý. Bản sao mới sẽ ',
- switchTip: 'không cho phép',
- switchTipEnd: ' chuyển lại Cơ bản Quản lý.',
+ switch: 'Chuyển sang quản lý quy trình',
+ switchTipStart: 'Một bản sao ứng dụng mới sẽ được tạo và chuyển sang quản lý quy trình. Bản sao mới sẽ ',
+ switchTip: 'không thể',
+ switchTipEnd: ' chuyển lại quản lý cơ bản.',
switchLabel: 'Bản sao ứng dụng sẽ được tạo',
removeOriginal: 'Xóa ứng dụng gốc',
switchStart: 'Bắt đầu chuyển',
typeSelector: {
- all: 'TẤT CẢ Loại',
+ all: 'Tất cả loại',
chatbot: 'Chatbot',
- agent: 'Đại lý',
+ agent: 'Tác nhân',
workflow: 'Quy trình',
completion: 'Hoàn thành',
},
+ tracing: {
+ title: 'Theo dõi hiệu suất ứng dụng',
+ description: 'Cấu hình nhà cung cấp LLMOps bên thứ ba và theo dõi hiệu suất ứng dụng.',
+ config: 'Cấu hình',
+ collapse: 'Thu gọn',
+ expand: 'Mở rộng',
+ tracing: 'Theo dõi',
+ disabled: 'Đã tắt',
+ disabledTip: 'Vui lòng cấu hình nhà cung cấp trước',
+ enabled: 'Đang hoạt động',
+ tracingDescription: 'Ghi lại toàn bộ ngữ cảnh thực thi ứng dụng, bao gồm các cuộc gọi LLM, ngữ cảnh, lời nhắc, yêu cầu HTTP và nhiều hơn nữa, đến một nền tảng theo dõi của bên thứ ba.',
+ configProviderTitle: {
+ configured: 'Đã cấu hình',
+ notConfigured: 'Cấu hình nhà cung cấp để bật theo dõi',
+ moreProvider: 'Thêm nhà cung cấp',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: 'Nền tảng phát triển tất cả trong một cho mọi bước của vòng đời ứng dụng được hỗ trợ bởi LLM.',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: 'Theo dõi, đánh giá, quản lý lời nhắc và số liệu để gỡ lỗi và cải thiện ứng dụng LLM của bạn.',
+ },
+ inUse: 'Đang sử dụng',
+ configProvider: {
+ title: 'Cấu hình ',
+ placeholder: 'Nhập {{key}} của bạn',
+ project: 'Dự án',
+ publicKey: 'Khóa công khai',
+ secretKey: 'Khóa bí mật',
+ viewDocsLink: 'Xem tài liệu {{key}}',
+ removeConfirmTitle: 'Xóa cấu hình {{key}}?',
+ removeConfirmContent: 'Cấu hình hiện tại đang được sử dụng, việc xóa nó sẽ tắt tính năng Theo dõi.',
+ },
+ },
}
export default translation
diff --git a/web/i18n/vi-VN/common.ts b/web/i18n/vi-VN/common.ts
index ff0172fafca27a..80570861b54778 100644
--- a/web/i18n/vi-VN/common.ts
+++ b/web/i18n/vi-VN/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: 'Hủy bỏ',
clear: 'Xóa',
save: 'Lưu',
+ saveAndEnable: 'Lưu & Kích hoạt',
edit: 'Chỉnh sửa',
add: 'Thêm',
added: 'Đã thêm',
@@ -77,7 +78,7 @@ const translation = {
params: {
temperature: 'Nhiệt độ',
temperatureTip:
- 'Kiểm soát sự ngẫu nhiên: Giảm nhiệt độ dẫn đến ít kết quả hoàn thành ngẫu nhiên hơn. Khi nhiệt độ tiến gần về không, mô hình sẽ trở nên xác định và lặp lại.',
+ 'Kiểm soát độ ngẫu nhiên: Giảm nhiệt độ dẫn đến ít kết quả ngẫu nhiên hơn. Khi nhiệt độ gần bằng 0, mô hình sẽ trở nên xác định và lặp lại.',
top_p: 'Top P',
top_pTip:
'Kiểm soát đa dạng thông qua lấy mẫu nhân nhóm: 0.5 có nghĩa là nửa số tùy chọn có khả năng cao được xem xét.',
@@ -111,7 +112,7 @@ const translation = {
plugins: 'Plugins',
pluginsTips: 'Tích hợp các plugin bên thứ ba hoặc tạo ra các AI-Plugin tương thích với ChatGPT.',
datasets: 'Kiến thức',
- datasetsTips: 'SẮP RA MẮT: Nhập dữ liệu văn bản của bạn hoặc viết dữ liệu theo thời gian thực thông qua Webhook để cải thiện ngữ cảnh LLM.',
+ datasetsTips: 'SẮP RA MẮT: Nhập dữ liệu văn bản của bạn hoặc cập nhật dữ liệu theo thời gian thực thông qua Webhook để cải thiện ngữ cảnh LLM.',
newApp: 'Ứng dụng mới',
newDataset: 'Tạo Kiến thức',
tools: 'Công cụ',
@@ -128,8 +129,8 @@ const translation = {
logout: 'Đăng xuất',
},
settings: {
- accountGroup: 'ACCOUNT',
- workplaceGroup: 'WORKSPACE',
+ accountGroup: 'TÀI KHOẢN',
+ workplaceGroup: 'KHÔNG GIAN LÀM VIỆC',
account: 'Tài khoản của tôi',
members: 'Thành viên',
billing: 'Thanh toán',
@@ -173,7 +174,7 @@ const translation = {
normal: 'Bình thường',
normalTip: 'Chỉ có thể sử dụng ứng dụng, không thể xây dựng ứng dụng',
editor: 'Biên tập viên',
- editorTip: 'Chỉ có thể xây dựng ứng dụng, không thể quản lý cài đặt nhóm',
+ editorTip: 'Có thể xây dựng ứng dụng, không thể quản lý cài đặt nhóm',
inviteTeamMember: 'Mời thành viên nhóm',
inviteTeamMemberTip: 'Sau khi đăng nhập, họ có thể truy cập trực tiếp vào dữ liệu nhóm của bạn.',
email: 'Email',
@@ -228,7 +229,7 @@ const translation = {
},
openaiHosted: {
openaiHosted: 'OpenAI đang lưu trữ',
- onTrial: 'TRIÊN DÙNG THỬ',
+ onTrial: 'DÙNG THỬ',
exhausted: 'HẾT QUOTA',
desc: 'Dịch vụ lưu trữ OpenAI được cung cấp bởi Dify cho phép bạn sử dụng các mô hình như GPT-3.5. Trước khi hết lượng truy vấn dùng thử, bạn cần thiết lập các nhà cung cấp mô hình khác.',
callTimes: 'Số lần gọi',
@@ -238,7 +239,7 @@ const translation = {
},
anthropicHosted: {
anthropicHosted: 'Anthropic Claude',
- onTrial: 'TRIÊN DÙNG THỬ',
+ onTrial: 'DÙNG THỬ',
exhausted: 'HẾT QUOTA',
desc: 'Mô hình mạnh mẽ, vượt trội trong một loạt các nhiệm vụ từ trò chuyện phức tạp và tạo nội dung sáng tạo đến hướng dẫn chi tiết.',
callTimes: 'Số lần gọi',
@@ -265,7 +266,7 @@ const translation = {
setupModelFirst: 'Vui lòng thiết lập mô hình của bạn trước',
systemReasoningModel: {
key: 'Mô hình lập luận hệ thống',
- tip: 'Thiết lập mô hình suy luận mặc định sẽ được sử dụng để tạo ứng dụng, cũng như các tính năng như việc tạo tên cuộc trò chuyện và đề xuất câu hỏi tiếp theo cũng sẽ sử dụng mô hình suy luận mặc định.',
+ tip: 'Thiết lập mô hình suy luận mặc định sẽ được sử dụng để tạo ứng dụng. Các tính năng như tạo tên cuộc trò chuyện và đề xuất câu hỏi tiếp theo cũng sẽ sử dụng mô hình suy luận mặc định này.',
},
embeddingModel: {
key: 'Mô hình nhúng',
@@ -407,7 +408,7 @@ const translation = {
latestAvailable: 'Dify {{version}} là phiên bản mới nhất hiện có.',
},
appMenus: {
- overview: 'Tổng quan',
+ overview: 'Giám sát',
promptEng: 'Orchestrate',
apiAccess: 'Truy cập API',
logAndAnn: 'Nhật ký & Thông báo',
diff --git a/web/i18n/vi-VN/custom.ts b/web/i18n/vi-VN/custom.ts
index b36476b62ab0cd..6c91e519b0cccc 100644
--- a/web/i18n/vi-VN/custom.ts
+++ b/web/i18n/vi-VN/custom.ts
@@ -2,26 +2,26 @@ const translation = {
custom: 'Tùy chỉnh',
upgradeTip: {
prefix: 'Nâng cấp gói của bạn để',
- suffix: 'tùy chỉnh thương hiệu của bạn.',
+ suffix: 'tùy chỉnh thương hiệu.',
},
webapp: {
title: 'Tùy chỉnh thương hiệu WebApp',
- removeBrand: 'Xóa Được hỗ trợ bởi Dify',
- changeLogo: 'Thay đổi Hình ảnh Thương hiệu Được hỗ trợ bởi',
- changeLogoTip: 'Định dạng SVG hoặc PNG với kích thước tối thiểu là 40x40px',
+ removeBrand: 'Xóa "Được hỗ trợ bởi Dify"',
+ changeLogo: 'Thay đổi logo "Được hỗ trợ bởi"',
+ changeLogoTip: 'Định dạng SVG hoặc PNG với kích thước tối thiểu 40x40px',
},
app: {
title: 'Tùy chỉnh thương hiệu tiêu đề ứng dụng',
- changeLogoTip: 'Định dạng SVG hoặc PNG với kích thước tối thiểu là 80x80px',
+ changeLogoTip: 'Định dạng SVG hoặc PNG với kích thước tối thiểu 80x80px',
},
upload: 'Tải lên',
uploading: 'Đang tải lên',
- uploadedFail: 'Tải ảnh lên thất bại, vui lòng tải lên lại.',
+ uploadedFail: 'Tải ảnh lên thất bại, vui lòng thử lại.',
change: 'Thay đổi',
apply: 'Áp dụng',
- restore: 'Khôi phục Mặc định',
+ restore: 'Khôi phục mặc định',
customize: {
- contactUs: ' liên hệ với chúng tôi ',
+ contactUs: 'liên hệ với chúng tôi',
prefix: 'Để tùy chỉnh logo thương hiệu trong ứng dụng, vui lòng',
suffix: 'để nâng cấp lên phiên bản Doanh nghiệp.',
},
diff --git a/web/i18n/vi-VN/dataset-creation.ts b/web/i18n/vi-VN/dataset-creation.ts
index b29cd17c0b35ca..23b210d177e416 100644
--- a/web/i18n/vi-VN/dataset-creation.ts
+++ b/web/i18n/vi-VN/dataset-creation.ts
@@ -22,7 +22,7 @@ const translation = {
uploader: {
title: 'Tải lên tệp văn bản',
button: 'Kéo và thả tệp, hoặc',
- browse: 'Duyệt',
+ browse: 'Chọn tệp',
tip: 'Hỗ trợ {{supportTypes}}. Tối đa {{size}}MB mỗi tệp.',
validation: {
typeError: 'Loại tệp không được hỗ trợ',
@@ -30,7 +30,7 @@ const translation = {
count: 'Không hỗ trợ tải lên nhiều tệp',
filesNumber: 'Bạn đã đạt đến giới hạn tải lên lô của {{filesNumber}} tệp.',
},
- cancel: 'Hủy bỏ',
+ cancel: 'Hủy',
change: 'Thay đổi',
failed: 'Tải lên thất bại',
},
@@ -46,56 +46,56 @@ const translation = {
placeholder: 'Vui lòng nhập',
nameNotEmpty: 'Tên không thể để trống',
nameLengthInvaild: 'Tên phải từ 1 đến 40 ký tự',
- cancelButton: 'Hủy bỏ',
+ cancelButton: 'Hủy',
confirmButton: 'Tạo',
failed: 'Tạo thất bại',
},
},
stepTwo: {
- segmentation: 'Cài đặt đoạn',
+ segmentation: 'Cài đặt phân đoạn',
auto: 'Tự động',
- autoDescription: 'Tự động thiết lập quy tắc đoạn và tiền xử lý. Người dùng không quen thuộc được khuyến nghị chọn điều này.',
+ autoDescription: 'Tự động thiết lập quy tắc phân đoạn và tiền xử lý. Khuyến nghị cho người dùng mới.',
custom: 'Tùy chỉnh',
- customDescription: 'Tùy chỉnh quy tắc đoạn, độ dài đoạn và quy tắc tiền xử lý, v.v.',
- separator: 'Bộ phận xác định đoạn',
- separatorPlaceholder: 'Ví dụ, dòng mới (\\\\n) hoặc bộ phận phân cách đặc biệt (như "***")',
+ customDescription: 'Tùy chỉnh quy tắc phân đoạn, độ dài đoạn và quy tắc tiền xử lý, v.v.',
+ separator: 'Ký tự phân đoạn',
+ separatorPlaceholder: 'Ví dụ, dòng mới (\\\\n) hoặc ký tự đặc biệt (như "***")',
maxLength: 'Độ dài tối đa của đoạn',
- overlap: 'Chồng lấn đoạn',
- overlapTip: 'Thiết lập chồng lấn đoạn có thể duy trì sự liên quan ngữ nghĩa giữa chúng, tăng cường hiệu ứng truy xuất. Đề xuất thiết lập từ 10% đến 25% của kích thước đoạn tối đa.',
- overlapCheck: 'Chồng lấn đoạn không nên lớn hơn độ dài tối đa của đoạn',
+ overlap: 'Độ chồng lấp đoạn',
+ overlapTip: 'Thiết lập chồng lấp đoạn có thể duy trì sự liên quan ngữ nghĩa giữa chúng, tăng cường hiệu quả truy xuất. Đề xuất thiết lập từ 10% đến 25% của kích thước đoạn tối đa.',
+ overlapCheck: 'Độ chồng lấp đoạn không nên lớn hơn độ dài tối đa của đoạn',
rules: 'Quy tắc tiền xử lý văn bản',
removeExtraSpaces: 'Thay thế khoảng trắng liên tục, dòng mới và tab',
removeUrlEmails: 'Xóa tất cả URL và địa chỉ email',
- removeStopwords: 'Loại bỏ các từ dừng như "một", "một", "những"',
+ removeStopwords: 'Loại bỏ các từ dừng như "một", "và", "những"',
preview: 'Xác nhận & Xem trước',
reset: 'Đặt lại',
- indexMode: 'Chế độ chỉ số',
+ indexMode: 'Chế độ chỉ mục',
qualified: 'Chất lượng cao',
recommend: 'Khuyến nghị',
- qualifiedTip: 'Gọi giao diện nhúng hệ thống mặc định để xử lý để cung cấp độ chính xác cao hơn khi người dùng truy vấn.',
+ qualifiedTip: 'Sử dụng giao diện nhúng hệ thống mặc định để xử lý, cung cấp độ chính xác cao hơn khi người dùng truy vấn.',
warning: 'Vui lòng thiết lập khóa API nhà cung cấp mô hình trước.',
click: 'Đi đến cài đặt',
economical: 'Tiết kiệm',
- economicalTip: 'Sử dụng các động cơ vector ngoại tuyến, chỉ số từ khóa, v.v. để giảm chính xác mà không tốn token',
+ economicalTip: 'Sử dụng các động cơ vector ngoại tuyến, chỉ mục từ khóa, v.v. để giảm độ chính xác mà không tốn token',
QATitle: 'Phân đoạn theo định dạng Câu hỏi & Trả lời',
QATip: 'Bật tùy chọn này sẽ tiêu tốn thêm token',
QALanguage: 'Phân đoạn bằng',
- emstimateCost: 'Ước lượng',
- emstimateSegment: 'Đoạn ước tính',
+ emstimateCost: 'Ước tính',
+ emstimateSegment: 'Số đoạn ước tính',
segmentCount: 'đoạn',
calculating: 'Đang tính toán...',
fileSource: 'Tiền xử lý tài liệu',
notionSource: 'Tiền xử lý trang',
- other: 'và những ',
+ other: 'và ',
fileUnit: ' tệp',
notionUnit: ' trang',
- previousStep: 'Bước trước',
+ previousStep: 'Quay lại',
nextStep: 'Lưu & Xử lý',
save: 'Lưu & Xử lý',
- cancel: 'Hủy bỏ',
+ cancel: 'Hủy',
sideTipTitle: 'Tại sao phải phân đoạn và tiền xử lý?',
sideTipP1: 'Khi xử lý dữ liệu văn bản, phân đoạn và làm sạch là hai bước tiền xử lý quan trọng.',
- sideTipP2: 'Phân đoạn chia nhỏ văn bản dài thành đoạn để mô hình hiểu được tốt hơn. Điều này cải thiện chất lượng và tính liên quan của kết quả mô hình.',
+ sideTipP2: 'Phân đoạn chia nhỏ văn bản dài thành các đoạn để mô hình hiểu được tốt hơn. Điều này cải thiện chất lượng và tính liên quan của kết quả mô hình.',
sideTipP3: 'Làm sạch loại bỏ các ký tự và định dạng không cần thiết, làm cho Kiến thức trở nên sạch sẽ và dễ dàng phân tích hơn.',
sideTipP4: 'Phân đoạn và làm sạch đúng cách cải thiện hiệu suất của mô hình, cung cấp kết quả chính xác và có giá trị hơn.',
previewTitle: 'Xem trước',
@@ -104,8 +104,8 @@ const translation = {
previewSwitchTipStart: 'Xem trước đoạn hiện tại đang ở định dạng văn bản, chuyển sang xem trước dạng câu hỏi và trả lời sẽ',
previewSwitchTipEnd: ' tiêu tốn thêm token',
characters: 'ký tự',
- indexSettedTip: 'Để thay đổi phương pháp chỉ số, vui lòng đi tới ',
- retrivalSettedTip: 'Để thay đổi phương pháp chỉ số, vui lòng đi tới ',
+ indexSettedTip: 'Để thay đổi phương pháp chỉ mục, vui lòng đi tới ',
+ retrivalSettedTip: 'Để thay đổi phương pháp truy xuất, vui lòng đi tới ',
datasetSettingLink: 'cài đặt Kiến thức.',
},
stepThree: {
@@ -119,11 +119,11 @@ const translation = {
resume: 'Tiếp tục xử lý',
navTo: 'Đi đến tài liệu',
sideTipTitle: 'Tiếp theo là gì',
- sideTipContent: 'Sau khi tài liệu hoàn thành chỉ mục, Kiến thức có thể được tích hợp vào ứng dụng như một ngữ cảnh, bạn có thể tìm cài đặt ngữ cảnh trong trang chỉ đạo đoạn. Bạn cũng có thể tạo nó như một plugin chỉ mục ChatGPT độc lập để phát hành.',
+ sideTipContent: 'Sau khi tài liệu hoàn thành chỉ mục, Kiến thức có thể được tích hợp vào ứng dụng như một ngữ cảnh, bạn có thể tìm cài đặt ngữ cảnh trong trang điều chỉnh prompt. Bạn cũng có thể tạo nó như một plugin chỉ mục ChatGPT độc lập để phát hành.',
modelTitle: 'Bạn có chắc chắn muốn dừng việc nhúng?',
modelContent: 'Nếu bạn cần tiếp tục xử lý sau này, bạn sẽ tiếp tục từ vị trí bạn đã dừng lại.',
modelButtonConfirm: 'Xác nhận',
- modelButtonCancel: 'Hủy bỏ',
+ modelButtonCancel: 'Hủy',
},
}
diff --git a/web/i18n/vi-VN/dataset-documents.ts b/web/i18n/vi-VN/dataset-documents.ts
index 3cf486a3c90519..5df6e40718b4a9 100644
--- a/web/i18n/vi-VN/dataset-documents.ts
+++ b/web/i18n/vi-VN/dataset-documents.ts
@@ -1,17 +1,17 @@
const translation = {
list: {
title: 'Tài liệu',
- desc: 'Tất cả các tệp của Kiến thức được hiển thị ở đây, và toàn bộ Kiến thức có thể được liên kết với trích dẫn của Dify hoặc được lập chỉ mục thông qua plugin Chat.',
+ desc: 'Tất cả các tệp của Kiến thức được hiển thị ở đây. Toàn bộ Kiến thức có thể được liên kết với trích dẫn của Dify hoặc được lập chỉ mục thông qua plugin Chat.',
addFile: 'Thêm tệp',
- addPages: 'Thêm Trang',
+ addPages: 'Thêm trang',
table: {
header: {
fileName: 'TÊN TỆP',
- words: 'TỪ',
+ words: 'SỐ TỪ',
hitCount: 'SỐ LẦN TRUY VẤN',
uploadTime: 'THỜI GIAN TẢI LÊN',
status: 'TRẠNG THÁI',
- action: 'HÀNH ĐỘNG',
+ action: 'THAO TÁC',
},
},
action: {
@@ -23,7 +23,7 @@ const translation = {
archive: 'Lưu trữ',
unarchive: 'Khôi phục',
delete: 'Xóa',
- enableWarning: 'Tệp được lưu trữ không thể được kích hoạt',
+ enableWarning: 'Tệp đã lưu trữ không thể được kích hoạt',
sync: 'Đồng bộ',
},
index: {
@@ -66,41 +66,40 @@ const translation = {
contentTitle: 'nội dung đoạn',
content: 'nội dung',
template: 'Tải mẫu ở đây',
- cancel: 'Hủy bỏ',
- run: 'Chạy Hàng loạt',
+ cancel: 'Hủy',
+ run: 'Chạy hàng loạt',
runError: 'Chạy hàng loạt thất bại',
processing: 'Đang xử lý hàng loạt',
- completed: 'Nhập đã hoàn thành',
+ completed: 'Nhập hoàn tất',
error: 'Lỗi nhập',
ok: 'OK',
},
},
metadata: {
title: 'Siêu dữ liệu',
- desc: 'Gắn nhãn siêu dữ liệu cho các tài liệu cho phép trí tuệ nhân tạo truy cập chúng một cách kịp thời và tiết lộ nguồn của các tài liệu tham chiếu cho người dùng.',
+ desc: 'Gắn nhãn siêu dữ liệu cho các tài liệu cho phép AI truy cập chúng kịp thời và tiết lộ nguồn của các tài liệu tham chiếu cho người dùng.',
dateTimeFormat: 'D MMMM, YYYY hh:mm A',
docTypeSelectTitle: 'Vui lòng chọn loại tài liệu',
docTypeChangeTitle: 'Thay đổi loại tài liệu',
- docTypeSelectWarning:
- 'Nếu thay đổi loại tài liệu, các siêu dữ liệu hiện tại sẽ không được bảo toàn nữa',
+ docTypeSelectWarning: 'Nếu thay đổi loại tài liệu, các siêu dữ liệu hiện tại sẽ không được bảo toàn',
firstMetaAction: 'Bắt đầu',
placeholder: {
add: 'Thêm ',
select: 'Chọn ',
},
source: {
- upload_file: 'Tải lên Tệp',
+ upload_file: 'Tải lên tệp',
notion: 'Đồng bộ từ Notion',
github: 'Đồng bộ từ Github',
},
type: {
book: 'Sách',
- webPage: 'Trang Web',
+ webPage: 'Trang web',
paper: 'Bài báo',
- socialMediaPost: 'Bài viết trên Mạng xã hội',
+ socialMediaPost: 'Bài đăng mạng xã hội',
personalDocument: 'Tài liệu cá nhân',
businessDocument: 'Tài liệu doanh nghiệp',
- IMChat: 'Trò chuyện qua tin nhắn',
+ IMChat: 'Trò chuyện tin nhắn',
wikipediaEntry: 'Bài viết Wikipedia',
notion: 'Đồng bộ từ Notion',
github: 'Đồng bộ từ Github',
@@ -108,10 +107,10 @@ const translation = {
},
field: {
processRule: {
- processDoc: 'Xử lý Tài liệu',
+ processDoc: 'Xử lý tài liệu',
segmentRule: 'Quy tắc phân đoạn',
- segmentLength: 'Chiều dài các đoạn',
- processClean: 'Quy tắc làm sạch Văn bản',
+ segmentLength: 'Độ dài đoạn',
+ processClean: 'Quy tắc làm sạch văn bản',
},
book: {
title: 'Tiêu đề',
@@ -120,7 +119,7 @@ const translation = {
publisher: 'Nhà xuất bản',
publicationDate: 'Ngày xuất bản',
ISBN: 'ISBN',
- category: 'Danh mục',
+ category: 'Thể loại',
},
webPage: {
title: 'Tiêu đề',
@@ -137,7 +136,7 @@ const translation = {
author: 'Tác giả',
publishDate: 'Ngày xuất bản',
journalConferenceName: 'Tên tạp chí/Hội nghị',
- volumeIssuePage: 'Số/Trang',
+ volumeIssuePage: 'Tập/Số/Trang',
DOI: 'DOI',
topicsKeywords: 'Chủ đề/Từ khóa',
abstract: 'Tóm tắt',
@@ -146,14 +145,14 @@ const translation = {
platform: 'Nền tảng',
authorUsername: 'Tác giả/Tên người dùng',
publishDate: 'Ngày đăng',
- postURL: 'URL Bài viết',
+ postURL: 'URL bài đăng',
topicsTags: 'Chủ đề/Thẻ',
},
personalDocument: {
title: 'Tiêu đề',
author: 'Tác giả',
creationDate: 'Ngày tạo',
- lastModifiedDate: 'Ngày sửa đổi cuối cùng',
+ lastModifiedDate: 'Ngày sửa đổi cuối',
documentType: 'Loại tài liệu',
tagsCategory: 'Thẻ/Danh mục',
},
@@ -161,14 +160,14 @@ const translation = {
title: 'Tiêu đề',
author: 'Tác giả',
creationDate: 'Ngày tạo',
- lastModifiedDate: 'Ngày sửa đổi cuối cùng',
+ lastModifiedDate: 'Ngày sửa đổi cuối',
documentType: 'Loại tài liệu',
departmentTeam: 'Phòng ban/Nhóm',
},
IMChat: {
- chatPlatform: 'Nền tảng Trò chuyện',
- chatPartiesGroupName: 'Đối tác Trò chuyện/Tên nhóm',
- participants: 'Tham gia viên',
+ chatPlatform: 'Nền tảng trò chuyện',
+ chatPartiesGroupName: 'Người tham gia/Tên nhóm',
+ participants: 'Người tham gia',
startDate: 'Ngày bắt đầu',
endDate: 'Ngày kết thúc',
topicsKeywords: 'Chủ đề/Từ khóa',
@@ -178,8 +177,8 @@ const translation = {
title: 'Tiêu đề',
language: 'Ngôn ngữ',
webpageURL: 'URL trang web',
- editorContributor: 'Biên tập viên/Đóng góp viên',
- lastEditDate: 'Ngày chỉnh sửa cuối cùng',
+ editorContributor: 'Biên tập viên/Người đóng góp',
+ lastEditDate: 'Ngày chỉnh sửa cuối',
summaryIntroduction: 'Tóm tắt/Giới thiệu',
},
notion: {
@@ -187,7 +186,7 @@ const translation = {
language: 'Ngôn ngữ',
author: 'Tác giả',
createdTime: 'Thời gian tạo',
- lastModifiedTime: 'Thời gian chỉnh sửa cuối cùng',
+ lastModifiedTime: 'Thời gian sửa đổi cuối',
url: 'URL',
tag: 'Thẻ',
description: 'Mô tả',
@@ -201,21 +200,21 @@ const translation = {
programmingLang: 'Ngôn ngữ lập trình',
url: 'URL',
license: 'Giấy phép',
- lastCommitTime: 'Thời gian commit cuối cùng',
- lastCommitAuthor: 'Tác giả commit cuối cùng',
+ lastCommitTime: 'Thời gian commit cuối',
+ lastCommitAuthor: 'Tác giả commit cuối',
},
originInfo: {
originalFilename: 'Tên tệp gốc',
originalFileSize: 'Kích thước tệp gốc',
uploadDate: 'Ngày tải lên',
- lastUpdateDate: 'Ngày cập nhật cuối cùng',
+ lastUpdateDate: 'Ngày cập nhật cuối',
source: 'Nguồn',
},
technicalParameters: {
- segmentSpecification: 'Đặc tả các đoạn',
- segmentLength: 'Chiều dài các đoạn',
- avgParagraphLength: 'Độ dài trung bình của đoạn',
- paragraphs: 'Các đoạn',
+ segmentSpecification: 'Đặc tả đoạn',
+ segmentLength: 'Độ dài đoạn',
+ avgParagraphLength: 'Độ dài trung bình đoạn văn',
+ paragraphs: 'Số đoạn văn',
hitCount: 'Số lần truy vấn',
embeddingTime: 'Thời gian nhúng',
embeddedSpend: 'Chi phí nhúng',
@@ -250,7 +249,7 @@ const translation = {
},
categoryMap: {
book: {
- fiction: 'Hư cấu',
+ fiction: 'Tiểu thuyết',
biography: 'Tiểu sử',
history: 'Lịch sử',
science: 'Khoa học',
@@ -262,18 +261,18 @@ const translation = {
art: 'Nghệ thuật',
travel: 'Du lịch',
health: 'Sức khỏe',
- selfHelp: 'Tự giúp bản thân',
+ selfHelp: 'Tự lực',
businessEconomics: 'Kinh doanh và kinh tế',
cooking: 'Nấu ăn',
- childrenYoungAdults: 'Trẻ em và thanh thiếu niên',
+ childrenYoungAdults: 'Thiếu nhi và thanh thiếu niên',
comicsGraphicNovels: 'Truyện tranh và tiểu thuyết đồ họa',
- poetry: 'Thơ',
+ poetry: 'Thơ ca',
drama: 'Kịch',
other: 'Khác',
},
personalDoc: {
notes: 'Ghi chú',
- blogDraft: 'Nháp Blog',
+ blogDraft: 'Bản nháp blog',
diary: 'Nhật ký',
researchReport: 'Báo cáo nghiên cứu',
bookExcerpt: 'Trích đoạn sách',
@@ -283,7 +282,7 @@ const translation = {
photoCollection: 'Bộ sưu tập ảnh',
creativeWriting: 'Viết sáng tạo',
codeSnippet: 'Đoạn mã',
- designDraft: 'Bản dựng thiết kế',
+ designDraft: 'Bản phác thảo thiết kế',
personalResume: 'Sơ yếu lý lịch cá nhân',
other: 'Khác',
},
@@ -295,31 +294,31 @@ const translation = {
trainingMaterials: 'Tài liệu đào tạo',
requirementsDocument: 'Tài liệu yêu cầu',
designDocument: 'Tài liệu thiết kế',
- productSpecification: 'Thông số sản phẩm',
+ productSpecification: 'Thông số kỹ thuật sản phẩm',
financialReport: 'Báo cáo tài chính',
marketAnalysis: 'Phân tích thị trường',
projectPlan: 'Kế hoạch dự án',
teamStructure: 'Cấu trúc nhóm',
policiesProcedures: 'Chính sách và quy trình',
- contractsAgreements: 'Hợp đồng và thoả thuận',
- emailCorrespondence: 'Thư tín',
+ contractsAgreements: 'Hợp đồng và thỏa thuận',
+ emailCorrespondence: 'Thư từ trao đổi',
other: 'Khác',
},
},
},
embedding: {
processing: 'Đang nhúng...',
- paused: 'Đã tạm dừng việc nhúng',
- completed: 'Hoàn tất việc nhúng',
+ paused: 'Đã tạm dừng nhúng',
+ completed: 'Hoàn tất nhúng',
error: 'Lỗi khi nhúng',
docName: 'Đang xử lý văn bản',
mode: 'Quy tắc phân đoạn',
- segmentLength: 'Chiều dài các đoạn',
- textCleaning: 'Định nghĩa và làm sạch Văn bản',
+ segmentLength: 'Độ dài đoạn',
+ textCleaning: 'Định nghĩa và làm sạch văn bản',
segments: 'Đoạn',
highQuality: 'Chế độ chất lượng cao',
economy: 'Chế độ tiết kiệm',
- estimate: 'Ước lượng tiêu thụ',
+ estimate: 'Ước tính tiêu thụ',
stop: 'Dừng xử lý',
resume: 'Tiếp tục xử lý',
automatic: 'Tự động',
@@ -327,21 +326,21 @@ const translation = {
previewTip: 'Xem trước đoạn sẽ có sẵn sau khi việc nhúng hoàn tất',
},
segment: {
- paragraphs: 'Đoạn',
+ paragraphs: 'Đoạn văn',
keywords: 'Từ khóa',
addKeyWord: 'Thêm từ khóa',
keywordError: 'Độ dài tối đa của từ khóa là 20',
characters: 'ký tự',
hitCount: 'Số lần truy vấn',
- vectorHash: 'Băm vector: ',
+ vectorHash: 'Mã băm vector: ',
questionPlaceholder: 'thêm câu hỏi ở đây',
questionEmpty: 'Câu hỏi không thể trống',
answerPlaceholder: 'thêm câu trả lời ở đây',
answerEmpty: 'Câu trả lời không thể trống',
contentPlaceholder: 'thêm nội dung ở đây',
contentEmpty: 'Nội dung không thể trống',
- newTextSegment: 'Đoạn văn mới',
- newQaSegment: 'Đoạn câu hỏi & trả lời mới',
+ newTextSegment: 'Đoạn văn bản mới',
+ newQaSegment: 'Đoạn hỏi đáp mới',
delete: 'Xóa đoạn này?',
},
}
diff --git a/web/i18n/vi-VN/dataset-hit-testing.ts b/web/i18n/vi-VN/dataset-hit-testing.ts
index 044e34ab5f8925..a512888250610e 100644
--- a/web/i18n/vi-VN/dataset-hit-testing.ts
+++ b/web/i18n/vi-VN/dataset-hit-testing.ts
@@ -1,6 +1,6 @@
const translation = {
- title: 'Kiểm Tra Truy Vấn',
- desc: 'Kiểm tra hiệu ứng đánh trúng của Kiến thức dựa trên văn bản truy vấn đã cho.',
+ title: 'Kiểm tra truy vấn',
+ desc: 'Kiểm tra hiệu quả truy xuất của Kiến thức dựa trên văn bản truy vấn đã cho.',
dateTimeFormat: 'MM/DD/YYYY hh:mm A',
recents: 'Gần đây',
table: {
@@ -12,17 +12,17 @@ const translation = {
},
input: {
title: 'Văn bản nguồn',
- placeholder: 'Vui lòng nhập một văn bản, một câu khẳng định ngắn được khuyến nghị.',
+ placeholder: 'Vui lòng nhập một văn bản, khuyến nghị sử dụng một câu khẳng định ngắn.',
countWarning: 'Tối đa 200 ký tự.',
- indexWarning: 'Chỉ có trong Kiến thức chất lượng cao.',
- testing: 'Kiểm tra',
+ indexWarning: 'Chỉ có sẵn trong Kiến thức chất lượng cao.',
+ testing: 'Đang kiểm tra',
},
hit: {
- title: 'RETRIEVAL PARAGRAPHS',
- emptyTip: 'Kết quả Kiểm Tra Truy Vấn sẽ hiển thị ở đây',
+ title: 'CÁC ĐOẠN VĂN ĐƯỢC TRUY XUẤT',
+ emptyTip: 'Kết quả kiểm tra truy vấn sẽ hiển thị ở đây',
},
- noRecentTip: 'Không có kết quả truy vấn gần đây ở đây',
- viewChart: 'Xem VECTOR CHART',
+ noRecentTip: 'Không có kết quả truy vấn gần đây',
+ viewChart: 'Xem BIỂU ĐỒ VECTOR',
}
export default translation
diff --git a/web/i18n/vi-VN/dataset-settings.ts b/web/i18n/vi-VN/dataset-settings.ts
index beb649c30e5b75..e6feb782780077 100644
--- a/web/i18n/vi-VN/dataset-settings.ts
+++ b/web/i18n/vi-VN/dataset-settings.ts
@@ -1,30 +1,30 @@
const translation = {
title: 'Cài đặt Kiến thức',
- desc: 'Ở đây, bạn có thể sửa đổi các thuộc tính và phương pháp làm việc của Kiến thức.',
+ desc: 'Tại đây, bạn có thể sửa đổi các thuộc tính và phương pháp làm việc của Kiến thức.',
form: {
name: 'Tên Kiến thức',
namePlaceholder: 'Vui lòng nhập tên Kiến thức',
- nameError: 'Tên không thể trống',
+ nameError: 'Tên không thể để trống',
desc: 'Mô tả Kiến thức',
- descInfo: 'Vui lòng viết mô tả văn bản rõ ràng để chỉ rõ nội dung của Kiến thức. Mô tả này sẽ được sử dụng làm cơ sở cho việc kết hợp khi lựa chọn từ nhiều Kiến thức cho sự suy luận.',
- descPlaceholder: 'Miêu tả những gì có trong Kiến thức này. Một mô tả chi tiết cho phép AI truy cập nội dung của Kiến thức một cách kịp thời. Nếu trống, Dify sẽ sử dụng chiến lược hit mặc định.',
+ descInfo: 'Vui lòng viết mô tả rõ ràng về nội dung của Kiến thức. Mô tả này sẽ được sử dụng làm cơ sở cho việc kết hợp khi lựa chọn từ nhiều Kiến thức trong quá trình suy luận.',
+ descPlaceholder: 'Mô tả những gì có trong Kiến thức này. Một mô tả chi tiết giúp AI truy cập nội dung của Kiến thức một cách hiệu quả. Nếu để trống, Dify sẽ sử dụng chiến lược truy xuất mặc định.',
descWrite: 'Tìm hiểu cách viết mô tả Kiến thức tốt.',
permissions: 'Quyền hạn',
permissionsOnlyMe: 'Chỉ mình tôi',
permissionsAllMember: 'Tất cả thành viên nhóm',
indexMethod: 'Phương pháp chỉ mục',
indexMethodHighQuality: 'Chất lượng cao',
- indexMethodHighQualityTip: 'Gọi mô hình Embedding để xử lý nhằm cung cấp độ chính xác cao hơn khi người dùng truy vấn.',
+ indexMethodHighQualityTip: 'Sử dụng mô hình Embedding để xử lý, cung cấp độ chính xác cao hơn khi người dùng truy vấn.',
indexMethodEconomy: 'Tiết kiệm',
- indexMethodEconomyTip: 'Sử dụng các công cụ nhúng vector ngoại tuyến, chỉ mục từ khóa, v.v. để giảm độ chính xác mà không cần chi tiêu token',
+ indexMethodEconomyTip: 'Sử dụng các công cụ nhúng vector ngoại tuyến, chỉ mục từ khóa, v.v. để giảm độ chính xác mà không tiêu tốn token',
embeddingModel: 'Mô hình nhúng',
embeddingModelTip: 'Để thay đổi mô hình nhúng, vui lòng đi tới ',
embeddingModelTipLink: 'Cài đặt',
retrievalSetting: {
- title: 'Cài đặt truy vấn',
+ title: 'Cài đặt truy xuất',
learnMore: 'Tìm hiểu thêm',
- description: ' về phương pháp truy vấn.',
- longDescription: ' về phương pháp truy vấn, bạn có thể thay đổi điều này bất kỳ lúc nào trong cài đặt Kiến thức.',
+ description: ' về phương pháp truy xuất.',
+ longDescription: ' về phương pháp truy xuất. Bạn có thể thay đổi điều này bất kỳ lúc nào trong cài đặt Kiến thức.',
},
save: 'Lưu',
},
diff --git a/web/i18n/vi-VN/dataset.ts b/web/i18n/vi-VN/dataset.ts
index 6e32079bb208a9..27dad01f51c58b 100644
--- a/web/i18n/vi-VN/dataset.ts
+++ b/web/i18n/vi-VN/dataset.ts
@@ -1,44 +1,44 @@
const translation = {
knowledge: 'Kiến thức',
documentCount: ' tài liệu',
- wordCount: ' k từ',
+ wordCount: ' nghìn từ',
appCount: ' ứng dụng liên kết',
- createDataset: 'Tạo Kiến thức',
- createDatasetIntro: 'Nhập dữ liệu văn bản của bạn hoặc viết dữ liệu theo thời gian thực qua Webhook để tăng cường ngữ cảnh LLM.',
- deleteDatasetConfirmTitle: 'Xóa Kiến thức này?',
+ createDataset: 'Tạo bộ kiến thức',
+ createDatasetIntro: 'Nhập dữ liệu văn bản của bạn hoặc cập nhật dữ liệu theo thời gian thực qua Webhook để tăng cường ngữ cảnh cho LLM.',
+ deleteDatasetConfirmTitle: 'Xóa bộ kiến thức này?',
deleteDatasetConfirmContent:
- 'Xóa Kiến thức là không thể đảo ngược. Người dùng sẽ không còn có khả năng truy cập Kiến thức của bạn nữa, và tất cả các cấu hình và nhật ký nhắc nhở sẽ bị xóa vĩnh viễn.',
- datasetUsedByApp: 'Kiến thức này đang được sử dụng bởi một số ứng dụng. Các ứng dụng sẽ không thể sử dụng Kiến thức này nữa, và tất cả cấu hình lời nhắc và nhật ký sẽ bị xóa vĩnh viễn.',
- datasetDeleted: 'Kiến thức đã bị xóa',
- datasetDeleteFailed: 'Xóa Kiến thức không thành công',
- didYouKnow: 'Bạn đã biết chưa?',
- intro1: 'Kiến thức có thể được tích hợp vào ứng dụng Dify ',
+ 'Việc xóa bộ kiến thức là không thể hoàn tác. Người dùng sẽ không còn truy cập được vào bộ kiến thức của bạn, và tất cả cấu hình cùng nhật ký lời nhắc sẽ bị xóa vĩnh viễn.',
+ datasetUsedByApp: 'Bộ kiến thức này đang được sử dụng bởi một số ứng dụng. Các ứng dụng sẽ không thể sử dụng bộ kiến thức này nữa, và tất cả cấu hình lời nhắc cùng nhật ký sẽ bị xóa vĩnh viễn.',
+ datasetDeleted: 'Bộ kiến thức đã được xóa',
+ datasetDeleteFailed: 'Xóa bộ kiến thức không thành công',
+ didYouKnow: 'Bạn có biết?',
+ intro1: 'Bộ kiến thức có thể được tích hợp vào ứng dụng Dify ',
intro2: 'như một ngữ cảnh',
intro3: ',',
- intro4: 'hoặc nó ',
+ intro4: 'hoặc ',
intro5: 'có thể được tạo',
- intro6: ' dưới dạng một phần cắm chỉ mục ChatGPT độc lập để xuất bản',
+ intro6: ' dưới dạng một plugin chỉ mục ChatGPT độc lập để xuất bản',
unavailable: 'Không khả dụng',
- unavailableTip: 'Mô hình nhúng không khả dụng, mô hình nhúng mặc định cần được cấu hình',
- datasets: 'KIẾN THỨC',
+ unavailableTip: 'Mô hình nhúng không khả dụng, cần cấu hình mô hình nhúng mặc định',
+ datasets: 'BỘ KIẾN THỨC',
datasetsApi: 'API',
retrieval: {
semantic_search: {
title: 'Tìm kiếm Vector',
- description: 'Tạo các nhúng truy vấn và tìm kiếm phần văn bản giống nhất với biểu diễn vector của nó.',
+ description: 'Tạo các nhúng truy vấn và tìm kiếm đoạn văn bản tương tự nhất với biểu diễn vector của nó.',
},
full_text_search: {
title: 'Tìm kiếm Toàn văn bản',
- description: 'Chỉ mục tất cả các thuật ngữ trong tài liệu, cho phép người dùng tìm kiếm bất kỳ thuật ngữ nào và truy xuất phần văn bản liên quan chứa các thuật ngữ đó.',
+ description: 'Lập chỉ mục cho tất cả các thuật ngữ trong tài liệu, cho phép người dùng tìm kiếm bất kỳ thuật ngữ nào và truy xuất đoạn văn bản liên quan chứa các thuật ngữ đó.',
},
hybrid_search: {
- title: 'Tìm kiếm Hybrid',
- description: 'Thực hiện tìm kiếm toàn văn bản và tìm kiếm vector đồng thời, sắp xếp lại để chọn lựa phù hợp nhất với truy vấn của người dùng. Cấu hình của API mô hình Rerank là cần thiết.',
- recommend: 'Gợi ý',
+ title: 'Tìm kiếm Kết hợp',
+ description: 'Thực hiện tìm kiếm toàn văn bản và tìm kiếm vector đồng thời, sắp xếp lại để chọn kết quả phù hợp nhất với truy vấn của người dùng. Yêu cầu cấu hình API mô hình Rerank.',
+ recommend: 'Đề xuất',
},
invertedIndex: {
- title: 'Chỉ mục Nghịch đảo',
- description: 'Chỉ mục Nghịch đảo là một cấu trúc được sử dụng cho việc truy xuất hiệu quả. Tổ chức theo thuật ngữ, mỗi thuật ngữ trỏ đến tài liệu hoặc trang web chứa nó.',
+ title: 'Chỉ mục Ngược',
+ description: 'Chỉ mục Ngược là một cấu trúc được sử dụng cho việc truy xuất hiệu quả. Nó được tổ chức theo thuật ngữ, mỗi thuật ngữ trỏ đến tài liệu hoặc trang web chứa nó.',
},
change: 'Thay đổi',
changeRetrievalMethod: 'Thay đổi phương pháp truy xuất',
diff --git a/web/i18n/vi-VN/explore.ts b/web/i18n/vi-VN/explore.ts
index b5eb5be50a6592..9c5d21052e1b7c 100644
--- a/web/i18n/vi-VN/explore.ts
+++ b/web/i18n/vi-VN/explore.ts
@@ -2,8 +2,8 @@ const translation = {
title: 'Khám phá',
sidebar: {
discovery: 'Khám phá',
- chat: 'Chat',
- workspace: 'Kho lưu trữ',
+ chat: 'Trò chuyện',
+ workspace: 'Không gian làm việc',
action: {
pin: 'Ghim',
unpin: 'Bỏ ghim',
@@ -16,23 +16,23 @@ const translation = {
},
},
apps: {
- title: 'Khám phá Ứng dụng bởi Dify',
- description: 'Sử dụng ngay các ứng dụng mẫu này hoặc tùy chỉnh các ứng dụng của bạn dựa trên các mẫu.',
+ title: 'Khám phá ứng dụng bởi Dify',
+ description: 'Sử dụng ngay các ứng dụng mẫu này hoặc tùy chỉnh ứng dụng của bạn dựa trên các mẫu có sẵn.',
allCategories: 'Tất cả danh mục',
},
appCard: {
- addToWorkspace: 'Thêm vào Kho lưu trữ',
+ addToWorkspace: 'Thêm vào không gian làm việc',
customize: 'Tùy chỉnh',
},
appCustomize: {
title: 'Tạo ứng dụng từ {{name}}',
subTitle: 'Biểu tượng và tên ứng dụng',
- nameRequired: 'Tên ứng dụng là bắt buộc',
+ nameRequired: 'Vui lòng nhập tên ứng dụng',
},
category: {
Assistant: 'Trợ lý',
- Writing: 'Viết',
- Translate: 'Dịch',
+ Writing: 'Viết lách',
+ Translate: 'Dịch thuật',
Programming: 'Lập trình',
HR: 'Nhân sự',
},
diff --git a/web/i18n/vi-VN/login.ts b/web/i18n/vi-VN/login.ts
index 1fd3e55dfe1b32..8d291c7f33f1bd 100644
--- a/web/i18n/vi-VN/login.ts
+++ b/web/i18n/vi-VN/login.ts
@@ -1,70 +1,70 @@
const translation = {
- pageTitle: 'Xin chào, hãy bắt đầu!👋',
+ pageTitle: 'Xin chào, hãy bắt đầu! 👋',
welcome: 'Chào mừng bạn đến với Dify, vui lòng đăng nhập để tiếp tục.',
- email: 'Địa chỉ Email',
- emailPlaceholder: 'Email của bạn',
+ email: 'Địa chỉ email',
+ emailPlaceholder: 'Nhập email của bạn',
password: 'Mật khẩu',
- passwordPlaceholder: 'Mật khẩu của bạn',
+ passwordPlaceholder: 'Nhập mật khẩu của bạn',
name: 'Tên người dùng',
- namePlaceholder: 'Tên người dùng của bạn',
+ namePlaceholder: 'Nhập tên người dùng của bạn',
forget: 'Quên mật khẩu?',
signBtn: 'Đăng nhập',
installBtn: 'Cài đặt',
setAdminAccount: 'Thiết lập tài khoản quản trị',
- setAdminAccountDesc: 'Quyền tối đa cho tài khoản quản trị, có thể được sử dụng để tạo ứng dụng và quản lý các nhà cung cấp LLM, v.v.',
+ setAdminAccountDesc: 'Tài khoản quản trị có quyền tối đa, có thể tạo ứng dụng và quản lý các nhà cung cấp LLM, v.v.',
createAndSignIn: 'Tạo và đăng nhập',
- oneMoreStep: 'Một bước nữa',
+ oneMoreStep: 'Còn một bước nữa',
createSample: 'Dựa trên thông tin này, chúng tôi sẽ tạo một ứng dụng mẫu cho bạn',
invitationCode: 'Mã mời',
- invitationCodePlaceholder: 'Mã mời của bạn',
+ invitationCodePlaceholder: 'Nhập mã mời của bạn',
interfaceLanguage: 'Ngôn ngữ giao diện',
timezone: 'Múi giờ',
go: 'Đi đến Dify',
- sendUsMail: 'Gửi email giới thiệu của bạn cho chúng tôi, và chúng tôi sẽ xử lý yêu cầu mời.',
- acceptPP: 'Tôi đã đọc và chấp nhận chính sách bảo mật',
+ sendUsMail: 'Gửi email giới thiệu cho chúng tôi, chúng tôi sẽ xử lý yêu cầu mời của bạn.',
+ acceptPP: 'Tôi đã đọc và đồng ý với chính sách bảo mật',
reset: 'Vui lòng chạy lệnh sau để đặt lại mật khẩu của bạn',
withGitHub: 'Tiếp tục với GitHub',
withGoogle: 'Tiếp tục với Google',
- rightTitle: 'Mở khóa tiềm năng đầy đủ của LLM',
- rightDesc: 'Dễ dàng xây dựng ứng dụng AI hấp dẫn mắt, có thể vận hành và cải thiện được.',
+ rightTitle: 'Khai phá tiềm năng tối đa của LLM',
+ rightDesc: 'Dễ dàng xây dựng ứng dụng AI hấp dẫn, có thể vận hành và cải thiện được.',
tos: 'Điều khoản dịch vụ',
pp: 'Chính sách bảo mật',
tosDesc: 'Bằng cách đăng ký, bạn đồng ý với',
- goToInit: 'Nếu bạn chưa khởi tạo tài khoản, vui lòng đi đến trang khởi tạo',
- donthave: 'Chưa có?',
+ goToInit: 'Nếu bạn chưa khởi tạo tài khoản, vui lòng chuyển đến trang khởi tạo',
+ donthave: 'Chưa có tài khoản?',
invalidInvitationCode: 'Mã mời không hợp lệ',
accountAlreadyInited: 'Tài khoản đã được khởi tạo',
forgotPassword: 'Quên mật khẩu?',
resetLinkSent: 'Đã gửi liên kết đặt lại mật khẩu',
sendResetLink: 'Gửi liên kết đặt lại mật khẩu',
backToSignIn: 'Quay lại đăng nhập',
- forgotPasswordDesc: 'Vui lòng nhập địa chỉ email của bạn để đặt lại mật khẩu. Chúng tôi sẽ gửi cho bạn một email với hướng dẫn về cách đặt lại mật khẩu.',
- checkEmailForResetLink: 'Vui lòng kiểm tra email của bạn để nhận liên kết đặt lại mật khẩu. Nếu không thấy trong vài phút, hãy kiểm tra thư mục spam.',
+ forgotPasswordDesc: 'Vui lòng nhập địa chỉ email của bạn để đặt lại mật khẩu. Chúng tôi sẽ gửi cho bạn một email hướng dẫn cách đặt lại mật khẩu.',
+ checkEmailForResetLink: 'Vui lòng kiểm tra email để nhận liên kết đặt lại mật khẩu. Nếu không thấy trong vài phút, hãy kiểm tra thư mục spam.',
passwordChanged: 'Đăng nhập ngay',
changePassword: 'Đổi mật khẩu',
changePasswordTip: 'Vui lòng nhập mật khẩu mới cho tài khoản của bạn',
invalidToken: 'Mã thông báo không hợp lệ hoặc đã hết hạn',
confirmPassword: 'Xác nhận mật khẩu',
- confirmPasswordPlaceholder: 'Xác nhận mật khẩu mới của bạn',
+ confirmPasswordPlaceholder: 'Nhập lại mật khẩu mới của bạn',
passwordChangedTip: 'Mật khẩu của bạn đã được thay đổi thành công',
error: {
- emailEmpty: 'Địa chỉ Email là bắt buộc',
+ emailEmpty: 'Vui lòng nhập địa chỉ email',
emailInValid: 'Vui lòng nhập một địa chỉ email hợp lệ',
- nameEmpty: 'Tên là bắt buộc',
- passwordEmpty: 'Mật khẩu là bắt buộc',
- passwordInvalid: 'Mật khẩu phải chứa chữ và số, và độ dài phải lớn hơn 8',
+ nameEmpty: 'Vui lòng nhập tên',
+ passwordEmpty: 'Vui lòng nhập mật khẩu',
+ passwordInvalid: 'Mật khẩu phải chứa cả chữ và số, và có độ dài ít nhất 8 ký tự',
},
license: {
- tip: 'Trước khi bắt đầu Phiên bản Cộng đồng của Dify, hãy đọc',
+ tip: 'Trước khi bắt đầu sử dụng Phiên bản Cộng đồng của Dify, vui lòng đọc',
link: 'Giấy phép mã nguồn mở trên GitHub',
},
join: 'Tham gia',
joinTipStart: 'Mời bạn tham gia',
- joinTipEnd: 'đội tại Dify',
+ joinTipEnd: 'đội ngũ tại Dify',
invalid: 'Liên kết đã hết hạn',
explore: 'Khám phá Dify',
activatedTipStart: 'Bạn đã tham gia',
- activatedTipEnd: 'đội',
+ activatedTipEnd: 'đội ngũ',
activated: 'Đăng nhập ngay',
adminInitPassword: 'Mật khẩu khởi tạo quản trị viên',
validate: 'Xác thực',
diff --git a/web/i18n/vi-VN/share-app.ts b/web/i18n/vi-VN/share-app.ts
index 5ca2dc55b50557..d440ad55dc5580 100644
--- a/web/i18n/vi-VN/share-app.ts
+++ b/web/i18n/vi-VN/share-app.ts
@@ -2,10 +2,10 @@ const translation = {
common: {
welcome: '',
appUnavailable: 'Ứng dụng không khả dụng',
- appUnkonwError: 'Ứng dụng không khả dụng',
+ appUnkonwError: 'Ứng dụng gặp lỗi không xác định',
},
chat: {
- newChat: 'Trò chuyện mới',
+ newChat: 'Cuộc trò chuyện mới',
pinnedTitle: 'Đã ghim',
unpinnedTitle: 'Trò chuyện',
newChatDefaultName: 'Cuộc trò chuyện mới',
@@ -15,58 +15,54 @@ const translation = {
privatePromptConfigTitle: 'Cài đặt cuộc trò chuyện',
publicPromptConfigTitle: 'Lời nhắc ban đầu',
configStatusDes: 'Trước khi bắt đầu, bạn có thể chỉnh sửa cài đặt cuộc trò chuyện',
- configDisabled:
- 'Cài đặt của phiên trước đã được sử dụng cho phiên này.',
+ configDisabled: 'Cài đặt của phiên trước đã được sử dụng cho phiên này.',
startChat: 'Bắt đầu trò chuyện',
- privacyPolicyLeft:
- 'Vui lòng đọc ',
- privacyPolicyMiddle:
- 'chính sách bảo mật',
- privacyPolicyRight:
- ' được cung cấp bởi nhà phát triển ứng dụng.',
+ privacyPolicyLeft: 'Vui lòng đọc ',
+ privacyPolicyMiddle: 'chính sách bảo mật',
+ privacyPolicyRight: ' được cung cấp bởi nhà phát triển ứng dụng.',
deleteConversation: {
title: 'Xóa cuộc trò chuyện',
content: 'Bạn có chắc muốn xóa cuộc trò chuyện này không?',
},
tryToSolve: 'Thử giải quyết',
- temporarySystemIssue: 'Xin lỗi, có sự cố tạm thời của hệ thống.',
+ temporarySystemIssue: 'Xin lỗi, hệ thống đang gặp sự cố tạm thời.',
},
generation: {
tabs: {
- create: 'Chạy Một lần',
- batch: 'Chạy Theo Lô',
- saved: 'Đã Lưu',
+ create: 'Tạo đơn lẻ',
+ batch: 'Tạo hàng loạt',
+ saved: 'Đã lưu',
},
savedNoData: {
title: 'Bạn chưa lưu kết quả nào!',
description: 'Bắt đầu tạo nội dung và tìm kết quả đã lưu của bạn ở đây.',
startCreateContent: 'Bắt đầu tạo nội dung',
},
- title: 'Hoàn Thiện AI',
+ title: 'Hoàn thiện AI',
queryTitle: 'Nội dung truy vấn',
completionResult: 'Kết quả hoàn thiện',
- queryPlaceholder: 'Viết nội dung truy vấn của bạn...',
+ queryPlaceholder: 'Nhập nội dung truy vấn của bạn...',
run: 'Thực thi',
copy: 'Sao chép',
- resultTitle: 'Hoàn Thiện AI',
- noData: 'AI sẽ đưa ra điều bạn muốn ở đây.',
+ resultTitle: 'Kết quả AI',
+ noData: 'AI sẽ hiển thị kết quả ở đây.',
csvUploadTitle: 'Kéo và thả tệp CSV của bạn vào đây, hoặc ',
- browse: 'duyệt',
+ browse: 'chọn tệp',
csvStructureTitle: 'Tệp CSV phải tuân thủ cấu trúc sau:',
downloadTemplate: 'Tải xuống mẫu tại đây',
field: 'Trường',
batchFailed: {
- info: '{{num}} thực thi thất bại',
+ info: '{{num}} lần thực thi thất bại',
retry: 'Thử lại',
outputPlaceholder: 'Không có nội dung đầu ra',
},
errorMsg: {
empty: 'Vui lòng nhập nội dung vào tệp đã tải lên.',
- fileStructNotMatch: 'Tệp CSV tải lên không khớp cấu trúc.',
- emptyLine: 'Hàng {{rowIndex}} trống',
- invalidLine: 'Hàng {{rowIndex}}: {{varName}} không thể để trống',
- moreThanMaxLengthLine: 'Hàng {{rowIndex}}: {{varName}} không thể chứa nhiều hơn {{maxLength}} ký tự',
- atLeastOne: 'Vui lòng nhập ít nhất một hàng vào tệp đã tải lên.',
+ fileStructNotMatch: 'Cấu trúc tệp CSV tải lên không khớp.',
+ emptyLine: 'Dòng {{rowIndex}} trống',
+ invalidLine: 'Dòng {{rowIndex}}: {{varName}} không thể để trống',
+ moreThanMaxLengthLine: 'Dòng {{rowIndex}}: {{varName}} không thể chứa quá {{maxLength}} ký tự',
+ atLeastOne: 'Vui lòng nhập ít nhất một dòng vào tệp đã tải lên.',
},
},
}
diff --git a/web/i18n/vi-VN/tools.ts b/web/i18n/vi-VN/tools.ts
index faf491d892d47f..40e16a5fa9d455 100644
--- a/web/i18n/vi-VN/tools.ts
+++ b/web/i18n/vi-VN/tools.ts
@@ -1,6 +1,6 @@
const translation = {
title: 'Công cụ',
- createCustomTool: 'Tạo Công cụ Tùy chỉnh',
+ createCustomTool: 'Tạo công cụ tùy chỉnh',
type: {
all: 'Tất cả',
builtIn: 'Tích hợp sẵn',
@@ -11,36 +11,36 @@ const translation = {
line2: 'đóng góp công cụ cho Dify.',
viewGuide: 'Xem hướng dẫn',
},
- author: 'Bởi',
+ author: 'Tác giả',
auth: {
unauthorized: 'Chưa xác thực',
authorized: 'Đã xác thực',
setup: 'Thiết lập xác thực để sử dụng',
- setupModalTitle: 'Thiết lập Xác thực',
- setupModalTitleDescription: 'Sau khi cấu hình thông tin đăng nhập, tất cả các thành viên trong không gian làm việc có thể sử dụng công cụ này khi triển khai ứng dụng.',
+ setupModalTitle: 'Thiết lập xác thực',
+ setupModalTitleDescription: 'Sau khi cấu hình thông tin đăng nhập, tất cả thành viên trong không gian làm việc có thể sử dụng công cụ này khi triển khai ứng dụng.',
},
includeToolNum: 'Bao gồm {{num}} công cụ',
- addTool: 'Thêm Công cụ',
+ addTool: 'Thêm công cụ',
createTool: {
- title: 'Tạo Công cụ Tùy chỉnh',
+ title: 'Tạo công cụ tùy chỉnh',
editAction: 'Cấu hình',
- editTitle: 'Chỉnh sửa Công cụ Tùy chỉnh',
+ editTitle: 'Chỉnh sửa công cụ tùy chỉnh',
name: 'Tên',
toolNamePlaceHolder: 'Nhập tên công cụ',
schema: 'Schema',
schemaPlaceHolder: 'Nhập schema OpenAPI của bạn vào đây',
- viewSchemaSpec: 'Xem Chi tiết OpenAPI-Swagger',
+ viewSchemaSpec: 'Xem chi tiết OpenAPI-Swagger',
importFromUrl: 'Nhập từ URL',
importFromUrlPlaceHolder: 'https://...',
urlError: 'Vui lòng nhập URL hợp lệ',
examples: 'Ví dụ',
exampleOptions: {
json: 'Thời tiết (JSON)',
- yaml: 'Pet Store (YAML)',
- blankTemplate: 'Mẫu Trống',
+ yaml: 'Cửa hàng thú cưng (YAML)',
+ blankTemplate: 'Mẫu trống',
},
availableTools: {
- title: 'Công cụ Hiện có',
+ title: 'Công cụ hiện có',
name: 'Tên',
description: 'Mô tả',
method: 'Phương thức',
@@ -49,9 +49,9 @@ const translation = {
test: 'Kiểm tra',
},
authMethod: {
- title: 'Phương thức Xác thực',
+ title: 'Phương thức xác thực',
type: 'Loại xác thực',
- keyTooltip: 'Khóa Tiêu đề HTTP, Bạn có thể để trống nếu không biết là gì hoặc đặt nó thành một giá trị tùy chỉnh',
+ keyTooltip: 'Khóa tiêu đề HTTP, bạn có thể để trống nếu không biết hoặc đặt một giá trị tùy chỉnh',
types: {
none: 'Không',
api_key: 'Khóa API',
@@ -62,7 +62,7 @@ const translation = {
value: 'Giá trị',
},
authHeaderPrefix: {
- title: 'Loại Xác thực',
+ title: 'Loại xác thực',
types: {
basic: 'Cơ bản',
bearer: 'Bearer',
@@ -71,21 +71,21 @@ const translation = {
},
privacyPolicy: 'Chính sách bảo mật',
privacyPolicyPlaceholder: 'Vui lòng nhập chính sách bảo mật',
- customDisclaimer: 'Tuyên bố Tùy chỉnh',
- customDisclaimerPlaceholder: 'Vui lòng nhập tuyên bố tùy chỉnh',
+ customDisclaimer: 'Tuyên bố từ chối trách nhiệm tùy chỉnh',
+ customDisclaimerPlaceholder: 'Vui lòng nhập tuyên bố từ chối trách nhiệm tùy chỉnh',
deleteToolConfirmTitle: 'Xóa công cụ này?',
- deleteToolConfirmContent: 'Xóa công cụ là không thể hồi tơi. Người dùng sẽ không thể truy cập lại công cụ của bạn.',
+ deleteToolConfirmContent: 'Xóa công cụ là không thể hoàn tác. Người dùng sẽ không thể truy cập lại công cụ của bạn.',
},
test: {
title: 'Kiểm tra',
parametersValue: 'Tham số & Giá trị',
parameters: 'Tham số',
value: 'Giá trị',
- testResult: 'Kết quả Kiểm tra',
+ testResult: 'Kết quả kiểm tra',
testResultPlaceholder: 'Kết quả kiểm tra sẽ hiển thị ở đây',
},
thought: {
- using: 'Sử dụng',
+ using: 'Đang sử dụng',
used: 'Đã sử dụng',
requestTitle: 'Yêu cầu đến',
responseTitle: 'Phản hồi từ',
@@ -93,7 +93,7 @@ const translation = {
setBuiltInTools: {
info: 'Thông tin',
setting: 'Cài đặt',
- toolDescription: 'Mô tả Công cụ',
+ toolDescription: 'Mô tả công cụ',
parameters: 'Tham số',
string: 'chuỗi',
number: 'số',
@@ -101,17 +101,17 @@ const translation = {
infoAndSetting: 'Thông tin & Cài đặt',
},
noCustomTool: {
- title: 'Không có công cụ tùy chỉnh!',
+ title: 'Chưa có công cụ tùy chỉnh!',
content: 'Thêm và quản lý các công cụ tùy chỉnh của bạn ở đây để xây dựng ứng dụng AI.',
- createTool: 'Tạo Công cụ',
+ createTool: 'Tạo công cụ',
},
noSearchRes: {
title: 'Xin lỗi, không có kết quả!',
- content: 'Chúng tôi không thể tìm thấy bất kỳ công cụ nào phù hợp với tìm kiếm của bạn.',
- reset: 'Thiết lập lại Tìm kiếm',
+ content: 'Chúng tôi không tìm thấy công cụ nào phù hợp với tìm kiếm của bạn.',
+ reset: 'Đặt lại tìm kiếm',
},
builtInPromptTitle: 'Lời nhắc',
- toolRemoved: 'Công cụ đã được loại bỏ',
+ toolRemoved: 'Công cụ đã bị xóa',
notAuthorized: 'Công cụ chưa được xác thực',
howToGet: 'Cách nhận',
}
diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts
index 4d4a41d14f1aa7..cc65ed7aa763f9 100644
--- a/web/i18n/vi-VN/workflow.ts
+++ b/web/i18n/vi-VN/workflow.ts
@@ -2,34 +2,8 @@ const translation = {
common: {
undo: 'Hoàn tác',
redo: 'Làm lại',
- changeHistory: {
- title: 'Lịch sử thay đổi',
- placeholder: 'Bạn chưa thay đổi gì cả',
- clearHistory: 'Xóa lịch sử',
- hint: 'Gợi ý',
- hintText: 'Các hành động chỉnh sửa của bạn được theo dõi trong lịch sử thay đổi, được lưu trên thiết bị của bạn trong suốt phiên làm việc này. Lịch sử này sẽ bị xóa khi bạn thoát khỏi trình soạn thảo.',
- stepBackward_one: '{{count}} bước lùi',
- stepBackward_other: '{{count}} bước lùi',
- stepForward_one: '{{count}} bước tiến',
- stepForward_other: '{{count}} bước tiến',
- sessionStart: 'Bắt đầu phiên',
- currentState: 'Trạng thái hiện tại',
- nodeTitleChange: 'Tiêu đề khối đã thay đổi',
- nodeDescriptionChange: 'Mô tả khối đã thay đổi',
- nodeDragStop: 'Khối đã di chuyển',
- nodeChange: 'Khối đã thay đổi',
- nodeConnect: 'Khối đã kết nối',
- nodePaste: 'Khối đã dán',
- nodeDelete: 'Khối đã xóa',
- nodeAdd: 'Khối đã thêm',
- nodeResize: 'Khối đã thay đổi kích thước',
- noteAdd: 'Ghi chú đã thêm',
- noteChange: 'Ghi chú đã thay đổi',
- noteDelete: 'Ghi chú đã xóa',
- edgeDelete: 'Khối đã ngắt kết nối',
- },
- editing: 'Chỉnh sửa',
- autoSaved: 'Tự động lưu',
+ editing: 'Đang chỉnh sửa',
+ autoSaved: 'Đã tự động lưu',
unpublished: 'Chưa xuất bản',
published: 'Đã xuất bản',
publish: 'Xuất bản',
@@ -45,12 +19,12 @@ const translation = {
goBackToEdit: 'Quay lại trình chỉnh sửa',
conversationLog: 'Nhật ký cuộc trò chuyện',
features: 'Tính năng',
- debugAndPreview: 'Gỡ lỗi và xem trước',
+ debugAndPreview: 'Xem trước',
restart: 'Khởi động lại',
currentDraft: 'Bản nháp hiện tại',
currentDraftUnpublished: 'Bản nháp hiện tại chưa xuất bản',
latestPublished: 'Xuất bản mới nhất',
- publishedAt: 'Đã xuất bản tại',
+ publishedAt: 'Đã xuất bản lúc',
restore: 'Khôi phục',
runApp: 'Chạy ứng dụng',
batchRunApp: 'Chạy ứng dụng hàng loạt',
@@ -96,6 +70,53 @@ const translation = {
workflowAsToolTip: 'Cần cấu hình lại công cụ sau khi cập nhật quy trình làm việc.',
viewDetailInTracingPanel: 'Xem chi tiết',
},
+ env: {
+ envPanelTitle: 'Biến Môi Trường',
+ envDescription: 'Biến môi trường có thể được sử dụng để lưu trữ thông tin cá nhân và thông tin xác thực. Chúng chỉ được đọc và có thể được tách khỏi tệp DSL trong quá trình xuất.',
+ envPanelButton: 'Thêm Biến',
+ modal: {
+ title: 'Thêm Biến Môi Trường',
+ editTitle: 'Sửa Biến Môi Trường',
+ type: 'Loại',
+ name: 'Tên',
+ namePlaceholder: 'tên môi trường',
+ value: 'Giá trị',
+ valuePlaceholder: 'giá trị môi trường',
+ secretTip: 'Được sử dụng để xác định thông tin hoặc dữ liệu nhạy cảm, với cài đặt DSL được cấu hình để ngăn chặn rò rỉ.',
+ },
+ export: {
+ title: 'Xuất biến môi trường bí mật?',
+ checkbox: 'Xuất giá trị bí mật',
+ ignore: 'Xuất DSL',
+ export: 'Xuất DSL với giá trị bí mật',
+ },
+ },
+ changeHistory: {
+ title: 'Lịch sử thay đổi',
+ placeholder: 'Bạn chưa thay đổi gì cả',
+ clearHistory: 'Xóa lịch sử',
+ hint: 'Gợi ý',
+ hintText: 'Các hành động chỉnh sửa của bạn được theo dõi trong lịch sử thay đổi, được lưu trên thiết bị của bạn trong suốt phiên làm việc này. Lịch sử này sẽ bị xóa khi bạn thoát khỏi trình soạn thảo.',
+ stepBackward_one: '{{count}} bước lùi',
+ stepBackward_other: '{{count}} bước lùi',
+ stepForward_one: '{{count}} bước tiến',
+ stepForward_other: '{{count}} bước tiến',
+ sessionStart: 'Bắt đầu phiên',
+ currentState: 'Trạng thái hiện tại',
+ nodeTitleChange: 'Tiêu đề khối đã thay đổi',
+ nodeDescriptionChange: 'Mô tả khối đã thay đổi',
+ nodeDragStop: 'Khối đã di chuyển',
+ nodeChange: 'Khối đã thay đổi',
+ nodeConnect: 'Khối đã kết nối',
+ nodePaste: 'Khối đã dán',
+ nodeDelete: 'Khối đã xóa',
+ nodeAdd: 'Khối đã thêm',
+ nodeResize: 'Khối đã thay đổi kích thước',
+ noteAdd: 'Ghi chú đã thêm',
+ noteChange: 'Ghi chú đã thay đổi',
+ noteDelete: 'Ghi chú đã xóa',
+ edgeDelete: 'Khối đã ngắt kết nối',
+ },
errorMsg: {
fieldRequired: '{{field}} là bắt buộc',
authRequired: 'Yêu cầu xác thực',
@@ -175,7 +196,7 @@ const translation = {
userInputField: 'Trường đầu vào của người dùng',
changeBlock: 'Thay đổi khối',
helpLink: 'Liên kết trợ giúp',
- about: 'Về',
+ about: 'Giới thiệu',
createdBy: 'Tạo bởi ',
nextStep: 'Bước tiếp theo',
addNextStep: 'Thêm khối tiếp theo trong quy trình làm việc này',
diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts
index 648c7c6891b9e0..eb75ba1db3ede0 100644
--- a/web/i18n/zh-Hans/workflow.ts
+++ b/web/i18n/zh-Hans/workflow.ts
@@ -99,6 +99,33 @@ const translation = {
export: '导出包含 Secret 值的 DSL',
},
},
+ chatVariable: {
+ panelTitle: '会话变量',
+ panelDescription: '会话变量用于存储 LLM 需要的上下文信息,如用户偏好、对话历史等。它是可读写的。',
+ docLink: '查看文档了解更多。',
+ button: '添加变量',
+ modal: {
+ title: '添加会话变量',
+ editTitle: '编辑会话变量',
+ name: '名称',
+ namePlaceholder: '变量名',
+ type: '类型',
+ value: '默认值',
+ valuePlaceholder: '默认值,可以为空',
+ description: '描述',
+ descriptionPlaceholder: '变量的描述',
+ editInJSON: '在 JSON 中编辑',
+ oneByOne: '逐个添加',
+ editInForm: '在表单中编辑',
+ arrayValue: '值',
+ addArrayValue: '添加值',
+ objectKey: '属性',
+ objectType: '类型',
+ objectValue: '默认值',
+ },
+ storedContent: '存储内容',
+ updatedAt: '更新时间 ',
+ },
changeHistory: {
title: '变更历史',
placeholder: '尚未更改任何内容',
@@ -149,6 +176,7 @@ const translation = {
tabs: {
'searchBlock': '搜索节点',
'blocks': '节点',
+ 'searchTool': '搜索工具',
'tools': '工具',
'allTool': '全部',
'builtInTool': '内置',
@@ -173,6 +201,7 @@ const translation = {
'http-request': 'HTTP 请求',
'variable-assigner': '变量聚合器',
'variable-aggregator': '变量聚合器',
+ 'assigner': '变量赋值',
'iteration-start': '迭代开始',
'iteration': '迭代',
'parameter-extractor': '参数提取器',
@@ -189,6 +218,7 @@ const translation = {
'template-transform': '使用 Jinja 模板语法将数据转换为字符串',
'http-request': '允许通过 HTTP 协议发送服务器请求',
'variable-assigner': '将多路分支的变量聚合为一个变量,以实现下游节点统一配置。',
+ 'assigner': '变量赋值节点用于向可写入变量(例如会话变量)进行变量赋值。',
'variable-aggregator': '将多路分支的变量聚合为一个变量,以实现下游节点统一配置。',
'iteration': '对列表对象执行多次步骤直至输出所有结果。',
'parameter-extractor': '利用 LLM 从自然语言内推理提取出结构化参数,用于后置的工具调用或 HTTP 请求。',
@@ -215,6 +245,7 @@ const translation = {
checklistResolved: '所有问题均已解决',
organizeBlocks: '整理节点',
change: '更改',
+ optional: '(选填)',
},
nodes: {
common: {
@@ -406,6 +437,17 @@ const translation = {
},
setAssignVariable: '设置赋值变量',
},
+ assigner: {
+ 'assignedVariable': '赋值的变量',
+ 'writeMode': '写入模式',
+ 'writeModeTip': '赋值的变量为数组时,可添加变量至数组末尾。',
+ 'over-write': '覆盖',
+ 'append': '追加',
+ 'plus': '加',
+ 'clear': '清空',
+ 'setVariable': '设置变量',
+ 'variable': '变量',
+ },
tool: {
toAuthorize: '授权',
inputVars: '输入变量',
diff --git a/web/i18n/zh-Hant/app.ts b/web/i18n/zh-Hant/app.ts
index dfdc0ea7af853a..3c388065e75eda 100644
--- a/web/i18n/zh-Hant/app.ts
+++ b/web/i18n/zh-Hant/app.ts
@@ -84,6 +84,42 @@ const translation = {
workflow: '工作流',
completion: '文字生成',
},
+ tracing: {
+ title: '追蹤應用程式效能',
+ description: '配置第三方LLMOps提供商並追蹤應用程式效能。',
+ config: '配置',
+ collapse: '收起',
+ expand: '展開',
+ tracing: '追蹤',
+ disabled: '已禁用',
+ disabledTip: '請先配置提供商',
+ enabled: '服務中',
+ tracingDescription: '捕獲應用程式執行的完整上下文,包括LLM調用、上下文、提示、HTTP請求等,到第三方追蹤平台。',
+ configProviderTitle: {
+ configured: '已配置',
+ notConfigured: '配置提供商以啟用追蹤',
+ moreProvider: '更多提供商',
+ },
+ langsmith: {
+ title: 'LangSmith',
+ description: '一個全方位的開發者平台,用於LLM驅動的應用程式生命週期的每個步驟。',
+ },
+ langfuse: {
+ title: 'Langfuse',
+ description: '追蹤、評估、提示管理和指標,用於調試和改進您的LLM應用程式。',
+ },
+ inUse: '使用中',
+ configProvider: {
+ title: '配置 ',
+ placeholder: '輸入您的{{key}}',
+ project: '專案',
+ publicKey: '公鑰',
+ secretKey: '密鑰',
+ viewDocsLink: '查看{{key}}文檔',
+ removeConfirmTitle: '移除{{key}}配置?',
+ removeConfirmContent: '當前配置正在使用中,移除它將關閉追蹤功能。',
+ },
+ },
}
export default translation
diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts
index 78c34d33513f87..f4d6952f76ce10 100644
--- a/web/i18n/zh-Hant/common.ts
+++ b/web/i18n/zh-Hant/common.ts
@@ -12,6 +12,7 @@ const translation = {
cancel: '取消',
clear: '清空',
save: '儲存',
+ saveAndEnable: '儲存並啟用',
edit: '編輯',
add: '新增',
added: '已新增',
@@ -408,7 +409,7 @@ const translation = {
latestAvailable: 'Dify {{version}} 已是最新版本。',
},
appMenus: {
- overview: '概覽',
+ overview: '監控',
promptEng: '編排',
apiAccess: '訪問 API',
logAndAnn: '日誌與標註',
diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts
index a83fae330e8379..1e73788f168f4d 100644
--- a/web/i18n/zh-Hant/workflow.ts
+++ b/web/i18n/zh-Hant/workflow.ts
@@ -19,7 +19,7 @@ const translation = {
goBackToEdit: '返回編輯模式',
conversationLog: '對話記錄',
features: '功能',
- debugAndPreview: '調試和預覽',
+ debugAndPreview: '預覽',
restart: '重新開始',
currentDraft: '當前草稿',
currentDraftUnpublished: '當前草稿未發佈',
@@ -70,6 +70,27 @@ const translation = {
workflowAsToolTip: '工作流更新後需要重新配置工具參數',
viewDetailInTracingPanel: '查看詳細信息',
},
+ env: {
+ envPanelTitle: '環境變數',
+ envDescription: '環境變數可用於存儲私人信息和憑證。它們是唯讀的,並且可以在導出時與DSL文件分開。',
+ envPanelButton: '添加變數',
+ modal: {
+ title: '添加環境變數',
+ editTitle: '編輯環境變數',
+ type: '類型',
+ name: '名稱',
+ namePlaceholder: '環境名稱',
+ value: '值',
+ valuePlaceholder: '環境值',
+ secretTip: '用於定義敏感信息或數據,DSL設置配置為防止洩露。',
+ },
+ export: {
+ title: '導出機密環境變數?',
+ checkbox: '導出機密值',
+ ignore: '導出DSL',
+ export: '導出帶有機密值的DSL',
+ },
+ },
changeHistory: {
title: '變更履歷',
placeholder: '尚未更改任何內容',
diff --git a/web/models/log.ts b/web/models/log.ts
index 994da445dcf9d1..fbd4674c9b0f86 100644
--- a/web/models/log.ts
+++ b/web/models/log.ts
@@ -4,6 +4,7 @@ import type {
Edge,
Node,
} from '@/app/components/workflow/types'
+import type { Metadata } from '@/app/components/base/chat/chat/type'
// Log type contains key:string conversation_id:string created_at:string quesiton:string answer:string
export type Conversation = {
@@ -102,6 +103,7 @@ export type MessageContent = {
from_end_user_id?: string
}>
message_files: VisionFile[]
+ metadata: Metadata
agent_thoughts: any[] // TODO
workflow_run_id: string
}
diff --git a/web/package.json b/web/package.json
index 2d6cd0a511e565..9b8e50885c22c4 100644
--- a/web/package.json
+++ b/web/package.json
@@ -1,6 +1,6 @@
{
"name": "dify-web",
- "version": "0.6.16",
+ "version": "0.7.0",
"private": true,
"engines": {
"node": ">=18.17.0"
diff --git a/web/service/base.ts b/web/service/base.ts
index 7d9aac5ba2a454..bda83f1c8eb12c 100644
--- a/web/service/base.ts
+++ b/web/service/base.ts
@@ -538,14 +538,15 @@ export const ssePost = (
return handleStream(res, (str: string, isFirstMessage: boolean, moreInfo: IOnDataMoreInfo) => {
if (moreInfo.errorMessage) {
onError?.(moreInfo.errorMessage, moreInfo.errorCode)
- if (moreInfo.errorMessage !== 'AbortError: The user aborted a request.')
+ // TypeError: Cannot assign to read only property ... will happen in page leave, so it should be ignored.
+ if (moreInfo.errorMessage !== 'AbortError: The user aborted a request.' && !moreInfo.errorMessage.includes('TypeError: Cannot assign to read only property'))
Toast.notify({ type: 'error', message: moreInfo.errorMessage })
return
}
onData?.(str, isFirstMessage, moreInfo)
}, onCompleted, onThought, onMessageEnd, onMessageReplace, onFile, onWorkflowStarted, onWorkflowFinished, onNodeStarted, onNodeFinished, onIterationStart, onIterationNext, onIterationFinish, onTextChunk, onTTSChunk, onTTSEnd, onTextReplace)
}).catch((e) => {
- if (e.toString() !== 'AbortError: The user aborted a request.')
+ if (e.toString() !== 'AbortError: The user aborted a request.' && !e.toString().errorMessage.includes('TypeError: Cannot assign to read only property'))
Toast.notify({ type: 'error', message: e })
onError?.(e)
})
diff --git a/web/service/workflow.ts b/web/service/workflow.ts
index 1b805dff4f0b13..93ab0006d444b6 100644
--- a/web/service/workflow.ts
+++ b/web/service/workflow.ts
@@ -3,6 +3,7 @@ import { get, post } from './base'
import type { CommonResponse } from '@/models/common'
import type {
ChatRunHistoryResponse,
+ ConversationVariableResponse,
FetchWorkflowDraftResponse,
NodesDefaultConfigsResponse,
WorkflowRunHistoryResponse,
@@ -13,7 +14,7 @@ export const fetchWorkflowDraft = (url: string) => {
return get(url, {}, { silent: true }) as Promise
}
-export const syncWorkflowDraft = ({ url, params }: { url: string; params: Pick }) => {
+export const syncWorkflowDraft = ({ url, params }: { url: string; params: Pick }) => {
return post(url, { body: params }, { silent: true })
}
@@ -58,3 +59,7 @@ export const fetchNodeDefault = (appId: string, blockType: BlockEnum, query = {}
export const updateWorkflowDraftFromDSL = (appId: string, data: string) => {
return post(`apps/${appId}/workflows/draft/import`, { body: { data } })
}
+
+export const fetchCurrentValueOfConversationVariable: Fetcher = ({ url, params }) => {
+ return get(url, { params })
+}
diff --git a/web/types/workflow.ts b/web/types/workflow.ts
index 35c1bd2791436e..f7991bc4e09c91 100644
--- a/web/types/workflow.ts
+++ b/web/types/workflow.ts
@@ -1,6 +1,7 @@
import type { Viewport } from 'reactflow'
import type {
BlockEnum,
+ ConversationVariable,
Edge,
EnvironmentVariable,
Node,
@@ -23,7 +24,8 @@ export type NodeTracing = {
total_tokens: number
total_price: number
currency: string
- steps_boundary: number[]
+ iteration_id?: string
+ iteration_index?: number
}
metadata: {
iterator_length: number
@@ -58,6 +60,7 @@ export type FetchWorkflowDraftResponse = {
updated_at: number
tool_published: boolean
environment_variables?: EnvironmentVariable[]
+ conversation_variables?: ConversationVariable[]
}
export type NodeTracingListResponse = {
@@ -240,3 +243,11 @@ export type NodesDefaultConfigsResponse = {
type: string
config: any
}[]
+
+export type ConversationVariableResponse = {
+ data: (ConversationVariable & { updated_at: number; created_at: number })[]
+ has_more: boolean
+ limit: number
+ total: number
+ page: number
+}
diff --git a/web/utils/emoji.ts b/web/utils/emoji.ts
new file mode 100644
index 00000000000000..9123f780f25bcd
--- /dev/null
+++ b/web/utils/emoji.ts
@@ -0,0 +1,11 @@
+import { SearchIndex } from 'emoji-mart'
+import type { Emoji } from '@emoji-mart/data'
+
+export async function searchEmoji(value: string) {
+ const emojis: Emoji[] = await SearchIndex.search(value) || []
+
+ const results = emojis.map((emoji) => {
+ return emoji.skins[0].native
+ })
+ return results
+}