Skip to content

Commit

Permalink
cut down openai API-compatible model config options; fix gemini bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
guchenhe committed Dec 28, 2023
1 parent c8ef7dc commit e08c13d
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 273 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model_type: llm
features:
- vision
model_properties:
mode: completion
mode: chat
context_size: 12288
parameter_rules:
- name: temperature
Expand Down
5 changes: 5 additions & 0 deletions api/core/model_runtime/model_providers/google/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,12 @@ def _convert_one_message_to_text(self, message: PromptMessage) -> str:
"""
human_prompt = "\n\nuser:"
ai_prompt = "\n\nmodel:"

content = message.content
if isinstance(content, list):
content = "".join(
c.data for c in content if c.type != PromptMessageContentType.IMAGE
)

if isinstance(message, UserPromptMessage):
message_text = f"{human_prompt} {content}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode
label=I18nObject(en_US="Top K"),
type=ParameterType.INT,
default=int(credentials.get('top_k', 1)),
min=1,
max=100
),
ParameterRule(
name=DefaultParameterName.FREQUENCY_PENALTY.value,
Expand All @@ -99,9 +101,9 @@ def get_customizable_model_schema(self, model: str, credentials: dict) -> AIMode
name=DefaultParameterName.MAX_TOKENS.value,
label=I18nObject(en_US="Max Tokens"),
type=ParameterType.INT,
default=int(credentials.get('max_tokens_to_sample', 1024)),
default=1024,
min=1,
max=4096
max=int(credentials.get('max_tokens_to_sample', 1024)),
)
],
pricing=PriceConfig(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,11 @@ model_credential_schema:
en_US: Enter full model name
zh_Hans: 输入模型全称
credential_form_schemas:
- variable: require_key
label:
en_US: Does this model need an API key?
zh_Hans: 模型是否需要 API Key
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不需要
- value: true
label:
en_US: "Yes"
zh_Hans: "需要"
- variable: api_key
label:
en_US: API Key
type: secret-input
required: true
required: false
placeholder:
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key
Expand All @@ -61,262 +45,12 @@ model_credential_schema:
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: max_chunks
label:
zh_Hans: Embedding 模型段落上限
en_US: Max chunks for embedding models
show_on:
- variable: __model_type
value: text-embedding
type: text-input
default: '1'
placeholder:
zh_Hans: 在此输入您的 Embedding 模型段落上限
en_US: Enter maximum chunks allowed by model
- variable: require_temperature
label:
en_US: Doe the model support adjusting temperature?
zh_Hans: 模型是否支持温度参数
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不支持
- value: true
label:
en_US: "Yes"
zh_Hans: "支持"
show_on:
- variable: __model_type
value: llm
- variable: temperature
label:
zh_Hans: 模型温度 (在 0-2 之间)
en_US: Temperature (between 0-2)
show_on:
- variable: __model_type
value: llm
- variable: require_temperature
value: true
default: '0.7'
type: text-input
- variable: require_top_p
label:
en_US: Doe the model support adjusting Top P?
zh_Hans: 模型是否支持 Top P 参数
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不支持
- value: true
label:
en_US: "Yes"
zh_Hans: "支持"
show_on:
- variable: __model_type
value: llm
- variable: top_p
label:
zh_Hans: Top P (在 0-1 之间)
en_US: Top P (between 0-1)
show_on:
- variable: __model_type
value: llm
- variable: require_top_p
value: true
default: '1'
type: text-input
- variable: require_top_k
label:
en_US: Doe the model support adjusting Top K?
zh_Hans: 模型是否支持 Top K 参数
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不支持
- value: true
label:
en_US: "Yes"
zh_Hans: "支持"
show_on:
- variable: __model_type
value: llm
- variable: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: text-input
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
show_on:
- variable: __model_type
value: llm
default: '1'
- variable: require_frequency_penalty
label:
en_US: Doe the model support adjusting Frequency Penalty?
zh_Hans: 模型是否支持 Frequency Penalty 参数
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不支持
- value: true
label:
en_US: "Yes"
zh_Hans: "支持"
show_on:
- variable: __model_type
value: llm
- variable: frequency_penalty
label:
zh_Hans: 频率惩罚 (在 0 ± 2 之间)
en_US: Frequency Penalty (between 0 ± 2)
show_on:
- variable: __model_type
value: llm
- variable: require_frequency_penalty
value: true
default: '0'
type: text-input
- variable: require_presence_penalty
label:
en_US: Doe the model support adjusting Presence Penalty?
zh_Hans: 模型是否支持 Presence Penalty 参数
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不支持
- value: true
label:
en_US: "Yes"
zh_Hans: "支持"
show_on:
- variable: __model_type
value: llm
- variable: presence_penalty
label:
zh_Hans: 存在惩罚 (在 0 ± 2 之间)
en_US: Presence Penalty (between 0 ± 2)
show_on:
- variable: __model_type
value: llm
- variable: require_presence_penalty
value: true
default: '0'
type: text-input
- variable: require_max_token
label:
en_US: Doe the model support adjusting max token?
zh_Hans: 模型是否支持设置最大 Token 数
type: radio
required: true
default: false
options:
- value: false
label:
en_US: "No"
zh_Hans: 不支持
- value: true
label:
en_US: "Yes"
zh_Hans: "支持"
show_on:
- variable: __model_type
value: llm
- variable: max_tokens_to_sample
label:
zh_Hans: 最大 token
en_US: Maximum tokens to sample
zh_Hans: 最大 token 上限
en_US: Upper bound for max tokens
show_on:
- variable: __model_type
value: llm
- variable: require_max_token
value: true
default: '4096'
type: text-input
- variable: require_pricing_config
label:
zh_Hans: 是否需要设置价格估计
en_US: Do you need price estimation based on token use?
required: true
default: false
type: radio
options:
- value: false
label:
en_US: "No"
zh_Hans: "不需要"
- value: true
label:
en_US: "Yes"
zh_Hans: "需要"
- variable: input_price
label:
zh_Hans: 输入单价
en_US: input price
default: '0.001'
help:
zh_Hans: 模型输入 token 单价。
en_US: Price per token input.
type: text-input
show_on:
- variable: require_pricing_config
value: true
- variable: output_price
label:
zh_Hans: 输出单价
en_US: output price
label:
zh_Hans: 最大 token 数
en_US: Maximum tokens to sample
default: '0.002'
show_on:
- variable: __model_type
value: llm
- variable: require_pricing_config
value: true
help:
zh_Hans: 模型输出 token 单价。
en_US: Price per token output.
type: text-input
- variable: unit
label:
zh_Hans: 价格单位
en_US: pricing unit
type: text-input
default: '0.001'
help:
zh_Hans: 价格计算单位。
en_US: Price unit for calculation.
show_on:
- variable: require_pricing_config
value: true
- variable: currency
label:
zh_Hans: 币种
en_US: Currency
default: USD
type: text-input
show_on:
- variable: require_pricing_config
value: true
type: text-input

0 comments on commit e08c13d

Please sign in to comment.