From 8889b018f3b88e8e59ee65de9e0712573d0a110e Mon Sep 17 00:00:00 2001 From: Slavi Pantaleev Date: Sat, 1 Feb 2025 07:56:06 +0200 Subject: [PATCH] Adjust baibot's openai-config.yml.j2 to avoid `max_response_tokens` if unspecified Reasoning models like `o1` and `o3` and their `-mini` variants report errors if we try to configure `max_response_tokens` (which ultimately influences the `max_tokens` field in the API request): > invalid_request_error: Unsupported parameter: 'max_tokens' is not supported with this model. Use 'max_completion_tokens' instead. (param: max_tokens) (code: unsupported_parameter) `max_completion_tokens` is not yet supported by baibot, so the best we can do is at least get rid of `max_response_tokens` (`max_tokens`). Ref: https://github.com/etkecc/baibot/commit/db9422740ceca32956d9628b6326b8be206344e2 --- .../matrix-bot-baibot/templates/provider/openai-config.yml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/custom/matrix-bot-baibot/templates/provider/openai-config.yml.j2 b/roles/custom/matrix-bot-baibot/templates/provider/openai-config.yml.j2 index 56cf47fdd..c239e6d4a 100644 --- a/roles/custom/matrix-bot-baibot/templates/provider/openai-config.yml.j2 +++ b/roles/custom/matrix-bot-baibot/templates/provider/openai-config.yml.j2 @@ -8,7 +8,9 @@ text_generation: model_id: {{ matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_model_id | to_json }} prompt: {{ matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_prompt | to_json }} temperature: {{ matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_temperature | to_json }} + {% if matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_response_tokens %} max_response_tokens: {{ matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_response_tokens | int | to_json }} + {% endif %} max_context_tokens: {{ matrix_bot_baibot_config_agents_static_definitions_openai_config_text_generation_max_context_tokens | int | to_json }} {% endif %}