diff --git a/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py b/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py index 231345c2f4e231..832ba927406c4c 100644 --- a/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/gitee_ai/rerank/rerank.py @@ -122,7 +122,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.RERANK, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))}, ) return entity diff --git a/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py b/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py index 5ea7532564098d..feb57770285e4e 100644 --- a/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/gpustack/rerank/rerank.py @@ -140,7 +140,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.RERANK, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))}, ) return entity diff --git a/api/core/model_runtime/model_providers/jina/rerank/rerank.py b/api/core/model_runtime/model_providers/jina/rerank/rerank.py index aacc8e75d3ad07..22f882be6bdc4b 100644 --- a/api/core/model_runtime/model_providers/jina/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/jina/rerank/rerank.py @@ -128,7 +128,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.RERANK, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000))}, ) return entity diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py index 49c558f4a44ffa..f5be7a98289a3a 100644 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py @@ -193,7 +193,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000))}, ) return entity diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py index a16c91cd7ef81e..83c4facc8db76c 100644 --- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py @@ -139,7 +139,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py index c2b7297aac596e..793c384d5a8079 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py @@ -176,7 +176,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py index d78bdaa75e5423..7bbd31e87c595d 100644 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py @@ -182,7 +182,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py index 43233e61262264..9cd0c78d99df24 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py @@ -173,7 +173,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), + ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), ModelPropertyKey.MAX_CHUNKS: 1, }, parameter_rules=[], diff --git a/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py index e69c9fccba97ed..16f1bd43d8820d 100644 --- a/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/voyage/text_embedding/text_embedding.py @@ -166,7 +166,7 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode label=I18nObject(en_US=model), model_type=ModelType.TEXT_EMBEDDING, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, + model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512))}, ) return entity