我本地xubunt24.04已经安装opencode(https://github.com/anom...
作成日: 2026年1月31日
使用モデル GPT-5.2 Thinking by Chat01
作成日: 2026年1月31日
使用モデル GPT-5.2 Thinking by Chat01
我本地xubunt24.04已经安装opencode(https://github.com/anomalyco/opencode/),已经配置了claude sonnet和gpt5.2,请给出gemini3模型models中的options如何配置,部分配置内容如下
{
"$schema":"https://opencode.ai/config.json",
"provider":{
"Elysiz":{
"npm":"@ai-sdk/openai-compatible",
"name":"Elysiz",
"options":{
"baseURL":"https://elysia.h-e.top/v1",
"apiKey":"sk-SSEr9Vxk9rOA50xZudDoWmm6SOxuQikG"
},
"models":{
"claude-4.5-sonnet-real":{
"name":"claude-4.5-sonnet-real",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":true,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":3,
"output":15,
"cache_read":0.3,
"cache_write":3.75
},
"limit":{
"context":200000,
"output":64000
},
"options":{
"thinking":{
"type":"enabled",
"budgetTokens":32000
}
}
},
"claude-4.5-sonnet":{
"name":"claude-4.5-sonnet",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":true,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":3,
"output":15,
"cache_read":0.3,
"cache_write":3.75
},
"limit":{
"context":200000,
"output":64000
},
"options":{
"thinking":{
"type":"enabled",
"budgetTokens":32000
}
}
},
"gemini-3-pro-preview":{
"name":"gemini-3-pro-preview",
"thinking":true
},
"gpt-5.2":{
"name":"gpt-5.2",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":false,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":1.75,
"output":14,
"cache_read":0.175
},
"limit":{
"context":400000,
"output":128000
},
"options":{
"store":false,
"reasoningEffort":"xhigh",
"textVerbosity":"medium",
"reasoningSummary":"auto",
"include":[
"reasoning.encrypted_content"
]
}
}
}
},
"BoHe":{
"npm":"@ai-sdk/openai-compatible",
"name":"BoHe",
"options":{
"baseURL":"https://x666.me/v1",
"apiKey":"sk-IQrzxa9pkDC7igKlVI83AHGDmIdfEe6"
},
"models":{
"gpt-5.2-high":{
"name":"gpt-5.2-high",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":false,
"release_date":"2025-12-11",
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":1.75,
"output":14,
"cache_read":0.175
},
"limit":{
"context":400000,
"output":128000
},
"options":{
"store":false,
"reasoningEffort":"xhigh",
"textVerbosity":"medium",
"reasoningSummary":"auto",
"include":[
"reasoning.encrypted_content"
]
}
},
"gemini-3-pro-preview-bs":{
"name":"gemini-3-pro-preview-bs"
},
"gpt-5.2":{
"name":"gpt-5.2",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":false,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":1.75,
"output":14,
"cache_read":0.175
},
"limit":{
"context":400000,
"output":128000
},
"options":{
"store":false,
"reasoningEffort":"xhigh",
"textVerbosity":"medium",
"reasoningSummary":"auto",
"include":[
"reasoning.encrypted_content"
]
}
}
}
},
"B4U":{
"npm":"@ai-sdk/openai-compatible",
"name":"B4U",
"options":{
"baseURL":"https://b4u.qzz.io/v1",
"apiKey":"sk-T51gpwsP5bze6fd7lMpcbRzv0wKX0S"
},
"models":{
"claude-4.5-sonnet":{
"name":"claude-4.5-sonnet",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":true,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":3,
"output":15,
"cache_read":0.3,
"cache_write":3.75
},
"limit":{
"context":200000,
"output":64000
},
"options":{
"thinking":{
"type":"enabled",
"budgetTokens":32000
}
}
},
"claude-4.5-sonnet-think":{
"name":"claude-4.5-sonnet-think",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":true,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":3,
"output":15,
"cache_read":0.3,
"cache_write":3.75
},
"limit":{
"context":200000,
"output":64000
},
"options":{
"thinking":{
"type":"enabled",
"budgetTokens":32000
}
}
}
}
},
"QAQ":{
"npm":"@ai-sdk/openai-compatible",
"name":"QAQ",
"options":{
"baseURL":"https://ai.qaq.al",
"apiKey":"sk-df87dc1f3a78b272243b198e48b53fc3c0e"
},
"models":{
"gpt-5.2-codex":{
"name":"gpt-5.2-codex",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":false,
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":1.25,
"output":10,
"cache_read":0.125
},
"limit":{
"context":400000,
"output":128000
},
"options":{
"store":false,
"reasoningEffort":"high",
"textVerbosity":"medium",
"reasoningSummary":"auto",
"include":[
"reasoning.encrypted_content"
]
}
},
"gpt-5.2":{
"name":"gpt-5.2",
"attachment":true,
"reasoning":true,
"tool_call":true,
"temperature":false,
"release_date":"2025-12-11",
"modalities":{
"input":[
"text",
"image"
],
"output":[
"text"
]
},
"cost":{
"input":1.75,
"output":14,
"cache_read":0.175
},
"limit":{
"context":400000,
"output":128000
},
"options":{
"store":false,
"reasoningEffort":"xhigh",
"textVerbosity":"medium",
"reasoningSummary":"auto",
"include":[
"reasoning.encrypted_content"
]
}
}
}
}
}
}
先提醒一句:你把好几个 真实 API Key(sk-…) 直接贴在对话里了——这等同于“已泄露”。建议你立刻去对应平台 撤销/轮换这些 Key,然后把配置里的 Key 改成从环境变量读取(或用 opencode 的 auth 存储),不要再明文出现在配置/截图/聊天记录里。
OpenCode 对 Google Gemini 的“思考档位”控制用的是:
thinkingLevel: "low" / "high"includeThoughts: true/false (DeepWiki)你可以在 models.gemini-3-pro-preview.options 里这样写(示例):
json"gemini-3-pro-preview": { "name": "gemini-3-pro-preview", "attachment": true, "reasoning": true, "tool_call": true, "temperature": true, "modalities": { "input": ["text","image"], "output": ["text"] }, "options": { "thinkingLevel": "high", "includeThoughts": false }, "variants": { "low": { "thinkingLevel": "low", "includeThoughts": false }, "high": { "thinkingLevel": "high", "includeThoughts": false } } }
调用时你还可以用 providerID/modelID:variant 这种格式切档位,比如:
Elysiz/gemini-3-pro-preview:high(variant 语法就是这样设计的)(DeepWiki)
⚠️ 注意:只有当你的“Gemini 真的是按 Google Gemini 适配链路在跑”时,这组
thinkingLevel/includeThoughts才会被正确翻译成 Gemini 的参数。
@ai-sdk/openai-compatible 更像这类)如果你的后端是 Gemini OpenAI 兼容端点(例如 Google 官方 .../v1beta/openai/,或你的网关完整兼容这个语义),那么控制思考强度应该用 OpenAI 的 reasoning_effort 语义:
minimal/low → low,high → high,medium 不支持 (Google AI for Developers)"none" 关闭)(Google AI for Developers)在 opencode 的 model options 里你可以按你现有风格(camelCase)先这样配:
json"gemini-3-pro-preview": { "name": "gemini-3-pro-preview", "options": { "reasoningEffort": "high" }, "variants": { "low": { "reasoningEffort": "low" }, "high": { "reasoningEffort": "high" } } }
include_thoughts(把“thought summaries/签名”等也带出来)Google 的 OpenAI 兼容文档给的方式是通过 extra_body 传:
json"extra_body": { "google": { "thinking_config": { "thinking_level": "low", "include_thoughts": true } } }
并且强调:reasoning_effort 和 thinking_level/thinking_budget 不能同时用(功能重叠会冲突)。(Google AI for Developers)
所以你在 opencode 里要么用 reasoningEffort,要么改用(等价的)extra_body.google.thinking_config;不要两套一起上。
兼容性提醒:
extra_body是否会被你的网关/后端接受,取决于它是不是按 Google 文档实现的 OpenAI-compat。最稳的通常是先只用reasoningEffort=low/high。
extra_body,且不要和 reasoningEffort 混用。(Google AI for Developers)如果你告诉我:你这个 baseURL 背后到底是 Google 官方 v1beta/openai、还是某个多模型网关(例如把 Gemini 转成 OpenAI 协议的那种),我可以把上面那段 options 精确到“应该用 thinkingLevel 还是 reasoningEffort,字段名用 camelCase 还是 snake_case”。
我使用的是“penAI-Compatible 网关”,请给个gemin3的model中options,如下我写的如何,有不对或改进的请指出,并列出完整示例
"gemini-3-pro-preview":{
"name":"gemini-3-pro-preview",
"options": {
"reasoningEffort": "high"
},
"variants": {
"low": { "reasoningEffort": "low" },
"high": { "reasoningEffort": "high" }
}
},
你这段结构(options + variants)写法本身没问题,唯一需要重点改的是:如果你后端真的是 Gemini 的 OpenAI-Compatible(chat/completions)接口语义,那控制思考强度的请求字段在协议里是 reasoning_effort(下划线),不是 reasoningEffort(驼峰)。Google 的 OpenAI 兼容文档示例用的也是 reasoning_effort,并且明确了 Gemini 3 不支持 medium、也不能关闭 thinking。 (Google AI for Developers)
所以建议你把配置改成下面这样(并顺手加一个 minimal 变体,兼容更多档位):
reasoning_effort)json"gemini-3-pro-preview": { "name": "gemini-3-pro-preview", "options": { "reasoning_effort": "high" }, "variants": { "minimal": { "reasoning_effort": "minimal" }, "low": { "reasoning_effort": "low" }, "high": { "reasoning_effort": "high" } } }
reasoning_effort,且 Gemini 3 Pro:medium 会报错;none 也不能用于 Gemini 3(不能关 thinking)。(Google AI for Developers)variants 机制就是让你用 provider/model:variant 切档位,你的写法是对的。(OpenCode)Google 文档说可以用 extra_body.google.thinking_config,但注意:它和 reasoning_effort 不能同时用(功能重叠会冲突)。(Google AI for Developers)
你可以额外做一个“带 thoughts”的 variant(不要再放 reasoning_effort):
json"variants": { "high": { "reasoning_effort": "high" }, "high_thoughts": { "extra_body": { "google": { "thinking_config": { "thinking_level": "high", "include_thoughts": true } } } } }
(thinking_level / include_thoughts 这一套就是 Google 在 OpenAI compat 下的扩展字段。)(Google AI for Developers)
下面我用你现有结构写一个“可直接抄”的完整 model entry;注意我把 apiKey 改成环境变量写法,避免明文泄露(OpenCode 文档也推荐这样配)。(open-code.ai)
jsonc{ "$schema": "https://opencode.ai/config.json", "provider": { "Elysiz": { "npm": "@ai-sdk/openai-compatible", "name": "Elysiz", "options": { "baseURL": "https://elysia.h-e.top/v1", "apiKey": "{env:ELYSIZ_API_KEY}" }, "models": { "gemini-3-pro-preview": { "name": "gemini-3-pro-preview", // 下面这些能力开关你可留可不留;不影响 reasoning_effort "attachment": true, "reasoning": true, "tool_call": true, "temperature": true, "modalities": { "input": ["text", "image"], "output": ["text"] }, // 默认:high "options": { "reasoning_effort": "high" }, // 切换:Elysiz/gemini-3-pro-preview:low 之类 "variants": { "minimal": { "reasoning_effort": "minimal" }, "low": { "reasoning_effort": "low" }, "high": { "reasoning_effort": "high" }, // 可选:要 thought summaries 就用这个(不要和 reasoning_effort 同时用) "high_thoughts": { "extra_body": { "google": { "thinking_config": { "thinking_level": "high", "include_thoughts": true } } } } } } } } } }
你之前贴出来的那堆 sk-... 已经算“公开泄露”了:建议立刻去各网关/平台撤销并重建,然后只用 {env:...} 或 /connect 存到 OpenCode 的 auth 文件里(文档提到凭据会存到本地 auth.json)。(OpenCode)
如果你愿意,把你网关实际对外的 OpenAI 路径(是 /v1/chat/completions 还是别的)贴一行(不要带 key),我可以再帮你确认:OpenCode 这边到底是“原样透传字段”还是会做 camelCase→snake_case 映射,从而决定到底用 reasoning_effort 还是两者都要兼容。
我的确使用的是OpenAI-Compatible方式接入中转站的gemini,我怎么才能知道中转站有没有按照google约定的,你提到“Google 的 OpenAI 兼容文档示例用的也是 reasoning_effort,并且明确了 Gemini 3 不支持 medium、也不能关闭 thinking”,请问是否默认就是hign,如下配置是不也可行
"gemini-3-pro-preview-bs":{
"name":"gemini-3-pro-preview-bs",
"options": {
"reasoningEffort": "high"
},
"variants": {
"low": { "reasoningEffort": "low" },
"high": { "reasoningEffort": "high" }
}
},
我要知道的是只要开启了thinking模式,不要降智就行,我要这样
的配置
你现在的目标其实就两点:
不是。 Google 的 OpenAI 兼容文档写得很明确:如果你不传 reasoning_effort,Gemini 会用“该模型的默认 level / budget”。默认到底是啥取决于模型/服务端配置,不保证是 high。并且 Gemini 3 Pro 不支持 medium,也不能关闭 thinking。 (Google AI for Developers)
所以:想“不要降智”,就显式设置 high。
reasoningEffort(驼峰)是否可行?这要看你的中转站有没有做字段映射。
关键点:OpenCode 用的 @ai-sdk/openai-compatible 会“原样”发送参数,不会自动 camelCase→snake_case。OpenCode 官方 PR 里专门提到了这一点(例如 budgetTokens 不会自动变成 budget_tokens)。(GitHub)
而 Google 的 OpenAI 兼容接口示例使用的是 reasoning_effort(下划线)。(Google AI for Developers)
因此:
reasoningEffort 很可能无效(被忽略)或直接报 unknown field。reasoningEffort 也可能“碰巧可用”。用“黑盒探测”最靠谱(不需要看中转站代码):
reasoning_effort = "medium" 应该报错(Gemini 3 Pro 不支持)Google 文档:Gemini 3 Pro 的 medium 是 ERROR。(Google AI for Developers)
bashcurl "$BASE_URL/v1/chat/completions" \ -H "Authorization: Bearer $KEY" \ -H "Content-Type: application/json" \ -d '{ "model":"gemini-3-pro-preview", "reasoning_effort":"medium", "messages":[{"role":"user","content":"1+1=?"}] }'
low vs high 的 token/时延/质量差异同样 prompt 分别发 reasoning_effort:"low" 与 "high";通常 high 会更慢、token 更多、推理更“稳”。(这不是 100% 量化,但能辅助判断。)(Google AI for Developers)
我建议你直接用 snake_case 的 reasoning_effort,并且再给一个 驼峰 fallback variant,这样不需要你纠结中转站到底吃哪个字段——用哪个 variant 能跑就用哪个。
注意:不要在同一个请求里同时带
reasoning_effort和reasoningEffort,有些实现会直接报 unknown/ambiguous。
json"gemini-3-pro-preview-bs": { "name": "gemini-3-pro-preview-bs", "attachment": true, "reasoning": true, "tool_call": true, "temperature": true, "modalities": { "input": ["text", "image"], "output": ["text"] }, "variants": { "high": { "reasoning_effort": "high" }, "low": { "reasoning_effort": "low" }, "high_camel": { "reasoningEffort": "high" }, "low_camel": { "reasoningEffort": "low" } } }
使用时你就指定 variant(OpenCode 的 variants 机制就是这么用的):(OpenCode)
BoHe/gemini-3-pro-preview-bs:high (优先,符合 Google 文档字段)BoHe/gemini-3-pro-preview-bs:high_camelGoogle Gemini 3 的文档建议:Gemini 3 尽量保持 temperature 默认值(≈1.0),把 temperature 调到 <1 可能出现循环/性能下降。你现在 temperature:true 代表允许调,但如果你追求稳定,“不手动乱调”就行。(Google AI for Developers)
如果你把中转站的 OpenAI endpoint 路径(比如到底是 /v1/chat/completions 还是别的)和它对未知字段的典型报错样例(去掉 key)贴一段,我还能帮你判断:应该把默认 variant 固定到 high 还是 high_camel 更稳。