Add temperature override option in config

- Add temperature field to Config struct (optional override)\n- Pass tempOverride to ConvertOpenAIRequest\n- Use override temperature if set, otherwise use client's temperature\n- Document option in config.yaml with example\n- Update README with temperature override documentation
This commit is contained in:
Franz Kafka 2026-04-15 09:29:54 +00:00
parent cea246da83
commit 29292addac
5 changed files with 19 additions and 8 deletions

View file

@ -28,6 +28,7 @@ upstream_url: "https://api.z.ai/api/anthropic"
- `port`: Port to listen on (default: 8080) - `port`: Port to listen on (default: 8080)
- `upstream_url`: Base URL for the Anthropic-compatible upstream API - `upstream_url`: Base URL for the Anthropic-compatible upstream API
- `temperature` (optional): Override temperature for all requests. If set, this value is used instead of client-specified temperatures. Remove this line to respect client temperatures.
## Building ## Building

View file

@ -1,6 +1,11 @@
port: 8080 port: 8080
upstream_url: "https://api.z.ai/api/anthropic" upstream_url: "https://api.z.ai/api/anthropic"
# Temperature override for all requests (optional)
# If set, this temperature will be used instead of what clients request
# Remove this line or set to null to use client-specified temperatures
# temperature: 0.7
models: models:
- id: "glm-4.7" - id: "glm-4.7"
owned_by: "zhipu" owned_by: "zhipu"

View file

@ -7,7 +7,8 @@ import (
) )
// ConvertOpenAIRequest converts an OpenAI ChatCompletionRequest to Anthropic format // ConvertOpenAIRequest converts an OpenAI ChatCompletionRequest to Anthropic format
func ConvertOpenAIRequest(req *ChatCompletionRequest) *AnthropicRequest { // tempOverride, if provided, overrides any temperature from the request
func ConvertOpenAIRequest(req *ChatCompletionRequest, tempOverride *float64) *AnthropicRequest {
system, remainingMessages := extractSystemMessage(req.Messages) system, remainingMessages := extractSystemMessage(req.Messages)
anthropicReq := &AnthropicRequest{ anthropicReq := &AnthropicRequest{
@ -27,7 +28,10 @@ func ConvertOpenAIRequest(req *ChatCompletionRequest) *AnthropicRequest {
if req.Stream != nil { if req.Stream != nil {
anthropicReq.Stream = *req.Stream anthropicReq.Stream = *req.Stream
} }
if req.Temperature != nil { // Use temperature override if configured, otherwise use request temperature
if tempOverride != nil {
anthropicReq.Temperature = tempOverride
} else if req.Temperature != nil {
anthropicReq.Temperature = req.Temperature anthropicReq.Temperature = req.Temperature
} }
if req.TopP != nil { if req.TopP != nil {

View file

@ -12,15 +12,16 @@ import (
) )
type ModelConfig struct { type ModelConfig struct {
ID string `yaml:"id"` ID string `yaml:"id"`
OwnedBy string `yaml:"owned_by"` OwnedBy string `yaml:"owned_by"`
} }
// Config holds the application configuration // Config holds the application configuration
type Config struct { type Config struct {
Port int `yaml:"port"` Port int `yaml:"port"`
UpstreamURL string `yaml:"upstream_url"` UpstreamURL string `yaml:"upstream_url"`
Models []ModelConfig `yaml:"models"` Models []ModelConfig `yaml:"models"`
Temperature *float64 `yaml:"temperature,omitempty"`
} }
var config *Config var config *Config
@ -124,7 +125,7 @@ func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
// Convert to Anthropic format — always non-streaming to upstream // Convert to Anthropic format — always non-streaming to upstream
// (ZAI's streaming returns empty for GLM models) // (ZAI's streaming returns empty for GLM models)
anthropicReq := ConvertOpenAIRequest(&req) anthropicReq := ConvertOpenAIRequest(&req, config.Temperature)
anthropicReq.Stream = false anthropicReq.Stream = false
reqBody, _ := json.Marshal(anthropicReq) reqBody, _ := json.Marshal(anthropicReq)

BIN
proxx

Binary file not shown.