Understanding Buffer Overflow in Go
Go is safer than C regarding buffer overflow due to bounds checking, but issues can still occur:
panic: runtime error: slice bounds out of range
runtime: out of memory
fatal error: runtime: memory allocated exceeds limitThese indicate uncontrolled memory growth or slice access violations.
Common Scenarios and Solutions
Scenario 1: Unbounded io.ReadAll
Problem code:
``go
func main() {
resp, _ := http.Get("https://unknown-size-api.com/data")
data, err := io.ReadAll(resp.Body)
// If response is 10GB, this will exhaust memory
}
Solution - Limit reader size: ```go func main() { resp, err := http.Get("https://unknown-size-api.com/data") if err != nil { log.Fatal(err) } defer resp.Body.Close()
// Check Content-Length first if resp.ContentLength > 100*1024*1024 { // 100 MB log.Fatal("Response too large") }
// Use LimitReader for safety maxBytes := int64(10 * 1024 * 1024) // 10 MB max limited := io.LimitReader(resp.Body, maxBytes) data, err := io.ReadAll(limited) if err != nil { log.Fatal(err) }
// Check if we got exactly maxBytes (truncated) if len(data) == int(maxBytes) { log.Println("Warning: response may have been truncated") } }
// Or use a safer wrapper func safeReadAll(r io.Reader, maxSize int64) ([]byte, error) { limited := io.LimitReader(r, maxSize) data, err := io.ReadAll(limited) if err != nil { return nil, err } if int64(len(data)) >= maxSize { return nil, fmt.Errorf("data exceeds maximum size of %d bytes", maxSize) } return data, nil } ```
Scenario 2: Growing Slice Without Limit
Problem code: ```go func main() { data := []byte{}
// Append without checking size for i := 0; i < 1000000000; i++ { data = append(data, byte(i)) // Memory grows exponentially due to slice growth strategy } } ```
Solution - Pre-allocate with size limit: ```go func main() { maxSize := 100 * 1024 * 1024 // 100 MB limit data := make([]byte, 0, maxSize)
for i := 0; i < 1000000000; i++ { if len(data) >= maxSize { log.Fatal("Buffer size limit reached") } data = append(data, byte(i)) } }
// Use a bounded buffer type type BoundedBuffer struct { data []byte maxSize int }
func NewBoundedBuffer(maxSize int) *BoundedBuffer { return &BoundedBuffer{ data: make([]byte, 0, maxSize), maxSize: maxSize, } }
func (b *BoundedBuffer) Write(p []byte) (int, error) { if len(b.data)+len(p) > b.maxSize { return 0, fmt.Errorf("buffer overflow: max size %d bytes", b.maxSize) } b.data = append(b.data, p...) return len(p), nil }
func (b *BoundedBuffer) Bytes() []byte { return b.data } ```
Scenario 3: String Concatenation Memory Blowup
Problem code:
``go
func main() {
result := ""
for i := 0; i < 1000000; i++ {
result += "some string\n" // Creates new string each iteration
// Memory grows to ~O(n^2) for n iterations
}
}
Solution - Use strings.Builder: ```go func main() { var builder strings.Builder builder.Grow(1000000 * 12) // Pre-allocate estimated size
for i := 0; i < 1000000; i++ { builder.WriteString("some string\n")
// Optional: check size if builder.Len() > 100*1024*1024 { log.Fatal("String too large") } }
result := builder.String() }
// Bounded string builder type BoundedStringBuilder struct { builder strings.Builder maxSize int }
func (b *BoundedStringBuilder) WriteString(s string) error { if b.builder.Len()+len(s) > b.maxSize { return fmt.Errorf("string exceeds max size %d", b.maxSize) } b.builder.WriteString(s) return nil } ```
Scenario 4: Map Growing Without Bound
Problem code: ```go func main() { cache := make(map[string][]byte)
// Store unlimited entries for key, value := range incomingData { cache[key] = value // No size check } // Memory grows indefinitely } ```
Solution - Implement size limit: ```go type LimitedCache struct { data map[string][]byte maxSize int64 currentSize int64 }
func NewLimitedCache(maxSize int64) *LimitedCache { return &LimitedCache{ data: make(map[string][]byte), maxSize: maxSize, } }
func (c *LimitedCache) Set(key string, value []byte) error { size := int64(len(value))
// Check if adding would exceed limit if c.currentSize + size > c.maxSize { // Evict oldest or largest entries for k, v := range c.data { c.currentSize -= int64(len(v)) delete(c.data, k) if c.currentSize + size <= c.maxSize { break } }
if c.currentSize + size > c.maxSize { return fmt.Errorf("value too large: %d bytes", size) } }
// Remove old value if key exists if old, exists := c.data[key]; exists { c.currentSize -= int64(len(old)) }
c.data[key] = value c.currentSize += size return nil }
func (c *LimitedCache) Get(key string) ([]byte, bool) { return c.data[key], c.data[key] != nil } ```
Scenario 5: JSON Unmarshal Large Payload
Problem code:
``go
func main() {
data := make([]byte, 1000000000) // 1 GB
json.Unmarshal(data, &result) // Could create huge in-memory structures
}
Solution - Stream parsing with limits: ```go func main() { file, _ := os.Open("large.json")
decoder := json.NewDecoder(file) decoder.DisallowUnknownFields()
// Parse incrementally for { var item Item err := decoder.Decode(&item) if err == io.EOF { break } if err != nil { log.Fatal(err) }
// Process each item individually process(item) } }
// Or use jsonstream for streaming large arrays func parseJSONArray(r io.Reader, handler func(item json.RawMessage) error) error { decoder := json.NewDecoder(r)
// Expect opening bracket if _, err := decoder.Token(); err != nil { return err }
// Read array elements for decoder.More() { var raw json.RawMessage if err := decoder.Decode(&raw); err != nil { return err }
// Process raw JSON, optionally with size check if len(raw) > 1024*1024 { return fmt.Errorf("individual item too large") }
if err := handler(raw); err != nil { return err } }
// Expect closing bracket _, err := decoder.Token() return err } ```
Scenario 6: Copy Without Size Check
Problem code: ```go func main() { src, _ := os.Open("large-file.bin") dst, _ := os.Create("copy.bin")
io.Copy(dst, src) // Could exhaust memory if src is unbounded } ```
Solution - Copy with buffer limit: ```go func main() { src, _ := os.Open("large-file.bin") dst, _ := os.Create("copy.bin")
// Use fixed buffer size buf := make([]byte, 32*1024) // 32 KB buffer _, err := io.CopyBuffer(dst, src, buf) if err != nil { log.Fatal(err) }
// Or limit total bytes copied maxCopy := int64(100 * 1024 * 1024) // 100 MB max n, err := io.CopyN(dst, src, maxCopy) if n == maxCopy { // Check if there was more data more, _ := src.Read(make([]byte, 1)) if more > 0 { log.Println("File truncated due to size limit") } } } ```
Memory Limit Patterns
Set Memory Limit (Go 1.19+)
```go func main() { // Set soft memory limit debug.SetMemoryLimit(100 * 1024 * 1024) // 100 MB
// Enable GC more aggressively debug.SetGCPercent(50) // Run GC when heap grows 50%
// Your code... } ```
Monitor Memory Usage
```go func monitorMemory() { var m runtime.MemStats for { runtime.ReadMemStats(&m)
log.Printf("Alloc = %v MiB, TotalAlloc = %v MiB, Sys = %v MiB", m.Alloc/1024/1024, m.TotalAlloc/1024/1024, m.Sys/1024/1024)
if m.Alloc > 100*1024*1024 { // 100 MB log.Println("Memory usage high, triggering GC") runtime.GC() }
time.Sleep(10 * time.Second) } } ```
Graceful Memory Exhaustion Handling
```go func allocateWithFallback(size int) ([]byte, error) { data := make([]byte, size)
// Try to allocate if data == nil { // Try smaller size halfSize := size / 2 if halfSize < 1024 { // Minimum 1 KB return nil, errors.New("cannot allocate even minimum memory") } return allocateWithFallback(halfSize) }
return data, nil } ```
Verification
```go func TestBoundedBuffer(t *testing.T) { buf := NewBoundedBuffer(100)
// Normal write n, err := buf.Write([]byte("hello")) if err != nil || n != 5 { t.Errorf("expected successful write") }
// Overflow write _, err = buf.Write(make([]byte, 200)) if err == nil { t.Error("expected overflow error") } } ```