Fix critical issues: panic risk and body size limits

- Add type assertion check for session ID context value to prevent panic\n- Add 10MB request body size limit to prevent memory exhaustion\n- Return 413 (Request Entity Too Large) for oversized requests\n- Add maxBodySize constant for easy adjustment
This commit is contained in:
Franz Kafka 2026-04-15 09:11:50 +00:00
parent 1c3eccce10
commit 1c2f59c53e
2 changed files with 15 additions and 2 deletions

View file

@ -22,6 +22,9 @@ var config *Config
// httpClient is a shared HTTP client for all upstream requests
var httpClient = &http.Client{Timeout: 300 * time.Second}
// maxBodySize limits request body size to prevent memory exhaustion
const maxBodySize = 10 << 20 // 10MB
// blockedHeaders are headers that should never be forwarded to upstream
// for security/privacy reasons. These headers could leak internal URLs,
// session information, or other sensitive data.
@ -81,9 +84,15 @@ func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
}
apiKey := strings.TrimPrefix(authHeader, "Bearer ")
// Read body
// Read body (with size limit to prevent memory exhaustion)
r.Body = http.MaxBytesReader(w, r.Body, maxBodySize)
body, err := io.ReadAll(r.Body)
if err != nil {
// Check if this is a max bytes error
if err.Error() == "http: request body too large" {
writeError(w, http.StatusRequestEntityTooLarge, "Request body exceeds maximum size limit (10MB)", "invalid_request_error", "body_too_large")
return
}
writeError(w, http.StatusBadRequest, "Failed to read request body", "invalid_request_error", "body_read_error")
return
}
@ -96,7 +105,11 @@ func handleChatCompletions(w http.ResponseWriter, r *http.Request) {
}
// Get session ID from context (set by main)
sessionID := r.Context().Value(sessionIDKey).(string)
sessionID, ok := r.Context().Value(sessionIDKey).(string)
if !ok {
writeError(w, http.StatusInternalServerError, "Internal server error", "internal_error", "session_missing")
return
}
// Convert to Anthropic format — always non-streaming to upstream
// (ZAI's streaming returns empty for GLM models)

BIN
proxx

Binary file not shown.