Change default OpenAI model for baibot (gpt-4.1 -> gpt-5)

Ref: https://openai.com/index/introducing-gpt-5/
This commit is contained in:
Slavi Pantaleev
2025-08-08 07:20:18 +03:00
parent 593fbd74de
commit 389118760f

View File

@@ -368,16 +368,16 @@ matrix_bot_baibot_config_agents_static_definitions_openai_config_api_key: ""
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_enabled: true matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_enabled: true
# For valid model choices, see: https://platform.openai.com/docs/models # For valid model choices, see: https://platform.openai.com/docs/models
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_model_id: gpt-4.1 matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_model_id: gpt-5
# The prompt text to use (can be null or empty to not use a prompt). # The prompt text to use (can be null or empty to not use a prompt).
# See: https://huggingface.co/docs/transformers/en/tasks/prompting # See: https://huggingface.co/docs/transformers/en/tasks/prompting
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_prompt: "{{ matrix_bot_baibot_config_agents_static_definitions_prompt }}" matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_prompt: "{{ matrix_bot_baibot_config_agents_static_definitions_prompt }}"
# The temperature parameter controls the randomness of the generated text. # The temperature parameter controls the randomness of the generated text.
# See: https://blogs.novita.ai/what-are-large-language-model-settings-temperature-top-p-and-max-tokens/#what-is-llm-temperature # See: https://blogs.novita.ai/what-are-large-language-model-settings-temperature-top-p-and-max-tokens/#what-is-llm-temperature
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_temperature: 1.0 matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_temperature: 1.0
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_response_tokens: 16384 matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_response_tokens: ~
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_completion_tokens: ~ matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_completion_tokens: 128000
matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_context_tokens: 128000 matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_context_tokens: 400000
matrix_bot_baibot_config_agents_static_definitions_openai_config_speech_to_text_enabled: true matrix_bot_baibot_config_agents_static_definitions_openai_config_speech_to_text_enabled: true
matrix_bot_baibot_config_agents_static_definitions_openai_config_speech_to_text_model_id: whisper-1 matrix_bot_baibot_config_agents_static_definitions_openai_config_speech_to_text_model_id: whisper-1