性能优化 #

一、性能优化概述 #

1.1 优化方向 #

text
┌─────────────────────────────────────────────────────────┐
│                    性能优化方向                          │
├─────────────────────────────────────────────────────────┤
│                                                         │
│  应用层优化                                              │
│  ├── 中间件优化                                         │
│  ├── 路由优化                                           │
│  └── 处理函数优化                                       │
│                                                         │
│  网络层优化                                              │
│  ├── 响应压缩                                           │
│  ├── 连接池                                             │
│  └── Keep-Alive                                         │
│                                                         │
│  数据层优化                                              │
│  ├── 数据库连接池                                       │
│  ├── 缓存策略                                           │
│  └── 查询优化                                           │
│                                                         │
└─────────────────────────────────────────────────────────┘

二、中间件优化 #

2.1 中间件顺序 #

go
e := echo.New()

e.Use(middleware.RequestID())
e.Use(middleware.Recover())
e.Use(middleware.Logger())
e.Use(middleware.Gzip())

2.2 跳过不必要的中间件 #

go
e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
    Skipper: func(c echo.Context) bool {
        return c.Path() == "/health" || c.Path() == "/metrics"
    },
}))

2.3 条件性中间件 #

go
e.Use(func(next echo.HandlerFunc) echo.HandlerFunc {
    return func(c echo.Context) error {
        if strings.HasPrefix(c.Path(), "/api") {
            return authMiddleware(next)(c)
        }
        return next(c)
    }
})

三、响应优化 #

3.1 Gzip压缩 #

go
e.Use(middleware.GzipWithConfig(middleware.GzipConfig{
    Level: 5,
}))

3.2 响应缓存 #

go
func cacheMiddleware(ttl time.Duration) echo.MiddlewareFunc {
    return func(next echo.HandlerFunc) echo.HandlerFunc {
        return func(c echo.Context) error {
            if c.Request().Method != http.MethodGet {
                return next(c)
            }
            
            key := "cache:" + c.Request().URL.Path
            
            cached, err := rdb.Get(context.Background(), key).Result()
            if err == nil {
                c.Response().Header().Set("X-Cache", "HIT")
                return c.JSON(http.StatusOK, cached)
            }
            
            err = next(c)
            if err == nil {
                c.Response().Header().Set("X-Cache", "MISS")
            }
            
            return err
        }
    }
}

3.3 ETag支持 #

go
func etagMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
    return func(c echo.Context) error {
        err := next(c)
        if err != nil {
            return err
        }
        
        body := c.Response().Writer.(*responseWriter).body
        etag := fmt.Sprintf(`"%x"`, md5.Sum(body))
        
        c.Response().Header().Set("ETag", etag)
        
        if c.Request().Header.Get("If-None-Match") == etag {
            return c.NoContent(http.StatusNotModified)
        }
        
        return nil
    }
}

四、数据库优化 #

4.1 连接池配置 #

go
sqlDB, err := db.DB()
if err != nil {
    panic(err)
}

sqlDB.SetMaxOpenConns(25)
sqlDB.SetMaxIdleConns(5)
sqlDB.SetConnMaxLifetime(5 * time.Minute)
sqlDB.SetConnMaxIdleTime(10 * time.Minute)

4.2 查询优化 #

go
func getUsersOptimized(page, size int) ([]User, int64) {
    var users []User
    var total int64
    
    go func() {
        db.Model(&User{}).Count(&total)
    }()
    
    offset := (page - 1) * size
    db.Select("id, name, email").Offset(offset).Limit(size).Find(&users)
    
    return users, total
}

4.3 批量操作 #

go
func batchCreateUsers(users []User) error {
    return db.CreateInBatches(users, 100).Error
}

五、缓存策略 #

5.1 Redis缓存 #

go
func GetUserWithCache(ctx context.Context, id string) (*User, error) {
    key := "user:" + id
    
    cached, err := rdb.Get(ctx, key).Result()
    if err == nil {
        var user User
        json.Unmarshal([]byte(cached), &user)
        return &user, nil
    }
    
    user, err := getUserFromDB(id)
    if err != nil {
        return nil, err
    }
    
    data, _ := json.Marshal(user)
    rdb.Set(ctx, key, data, 10*time.Minute)
    
    return user, nil
}

5.2 本地缓存 #

go
type LocalCache struct {
    data map[string]interface{}
    mu   sync.RWMutex
}

func (c *LocalCache) Get(key string) (interface{}, bool) {
    c.mu.RLock()
    defer c.mu.RUnlock()
    
    val, ok := c.data[key]
    return val, ok
}

func (c *LocalCache) Set(key string, value interface{}) {
    c.mu.Lock()
    defer c.mu.Unlock()
    
    c.data[key] = value
}

六、并发处理 #

6.1 Goroutine池 #

go
type Pool struct {
    tasks chan func()
}

func NewPool(size int) *Pool {
    p := &Pool{
        tasks: make(chan func(), 100),
    }
    
    for i := 0; i < size; i++ {
        go p.worker()
    }
    
    return p
}

func (p *Pool) worker() {
    for task := range p.tasks {
        task()
    }
}

func (p *Pool) Submit(task func()) {
    p.tasks <- task
}

6.2 并发请求 #

go
func fetchMultiple(urls []string) []string {
    var wg sync.WaitGroup
    results := make([]string, len(urls))
    
    for i, url := range urls {
        wg.Add(1)
        go func(i int, url string) {
            defer wg.Done()
            results[i] = fetchURL(url)
        }(i, url)
    }
    
    wg.Wait()
    return results
}

七、性能监控 #

7.1 pprof集成 #

go
import (
    "net/http/pprof"
    "github.com/labstack/echo/v4"
)

func main() {
    e := echo.New()
    
    e.GET("/debug/pprof/*", echo.WrapHandler(http.DefaultServeMux))
    
    e.Start(":8080")
}

7.2 自定义指标 #

go
type Metrics struct {
    RequestCount   int64
    RequestLatency int64
    ErrorCount     int64
}

func metricsMiddleware(metrics *Metrics) echo.MiddlewareFunc {
    return func(next echo.HandlerFunc) echo.HandlerFunc {
        return func(c echo.Context) error {
            start := time.Now()
            
            err := next(c)
            
            atomic.AddInt64(&metrics.RequestCount, 1)
            atomic.AddInt64(&metrics.RequestLatency, int64(time.Since(start)))
            
            if err != nil {
                atomic.AddInt64(&metrics.ErrorCount, 1)
            }
            
            return err
        }
    }
}

7.3 健康检查 #

go
e.GET("/health", func(c echo.Context) error {
    return c.JSON(http.StatusOK, map[string]string{
        "status": "healthy",
    })
})

e.GET("/ready", func(c echo.Context) error {
    if err := db.Ping(); err != nil {
        return c.JSON(http.StatusServiceUnavailable, map[string]string{
            "status": "not ready",
        })
    }
    
    return c.JSON(http.StatusOK, map[string]string{
        "status": "ready",
    })
})

八、完整示例 #

go
package main

import (
    "context"
    "encoding/json"
    "net/http"
    "sync"
    "sync/atomic"
    "time"
    "github.com/labstack/echo/v4"
    "github.com/labstack/echo/v4/middleware"
    "gorm.io/driver/mysql"
    "gorm.io/gorm"
)

type User struct {
    ID        uint      `gorm:"primaryKey" json:"id"`
    Name      string    `json:"name"`
    Email     string    `json:"email"`
    CreatedAt time.Time `json:"created_at"`
}

type Metrics struct {
    RequestCount   int64
    RequestLatency int64
    ErrorCount     int64
}

type Cache struct {
    data map[string]cacheItem
    mu   sync.RWMutex
}

type cacheItem struct {
    value     interface{}
    expiresAt time.Time
}

var (
    db      *gorm.DB
    cache   *Cache
    metrics *Metrics
)

func initDB() error {
    dsn := "root:password@tcp(localhost:3306)/myapp?charset=utf8mb4&parseTime=True&loc=Local"
    
    var err error
    db, err = gorm.Open(mysql.Open(dsn), &gorm.Config{})
    if err != nil {
        return err
    }
    
    sqlDB, _ := db.DB()
    sqlDB.SetMaxOpenConns(25)
    sqlDB.SetMaxIdleConns(5)
    sqlDB.SetConnMaxLifetime(5 * time.Minute)
    
    return nil
}

func NewCache() *Cache {
    c := &Cache{
        data: make(map[string]cacheItem),
    }
    
    go c.cleanup()
    
    return c
}

func (c *Cache) Get(key string) (interface{}, bool) {
    c.mu.RLock()
    defer c.mu.RUnlock()
    
    item, ok := c.data[key]
    if !ok || time.Now().After(item.expiresAt) {
        return nil, false
    }
    
    return item.value, true
}

func (c *Cache) Set(key string, value interface{}, ttl time.Duration) {
    c.mu.Lock()
    defer c.mu.Unlock()
    
    c.data[key] = cacheItem{
        value:     value,
        expiresAt: time.Now().Add(ttl),
    }
}

func (c *Cache) cleanup() {
    ticker := time.NewTicker(time.Minute)
    for range ticker.C {
        c.mu.Lock()
        now := time.Now()
        for k, v := range c.data {
            if now.After(v.expiresAt) {
                delete(c.data, k)
            }
        }
        c.mu.Unlock()
    }
}

func main() {
    if err := initDB(); err != nil {
        panic(err)
    }
    
    cache = NewCache()
    metrics = &Metrics{}
    
    e := echo.New()
    
    e.Use(middleware.Recover())
    e.Use(middleware.GzipWithConfig(middleware.GzipConfig{Level: 5}))
    e.Use(metricsMiddleware(metrics))
    e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
        Skipper: func(c echo.Context) bool {
            return c.Path() == "/health"
        },
    }))
    
    e.GET("/health", health)
    e.GET("/metrics", getMetrics)
    e.GET("/users/:id", getUser)
    e.GET("/users", listUsers)
    
    e.Logger.Fatal(e.Start(":8080"))
}

func metricsMiddleware(m *Metrics) echo.MiddlewareFunc {
    return func(next echo.HandlerFunc) echo.HandlerFunc {
        return func(c echo.Context) error {
            start := time.Now()
            
            err := next(c)
            
            atomic.AddInt64(&m.RequestCount, 1)
            atomic.AddInt64(&m.RequestLatency, int64(time.Since(start)))
            
            if err != nil {
                atomic.AddInt64(&m.ErrorCount, 1)
            }
            
            return err
        }
    }
}

func health(c echo.Context) error {
    return c.JSON(http.StatusOK, map[string]string{"status": "healthy"})
}

func getMetrics(c echo.Context) error {
    return c.JSON(http.StatusOK, map[string]int64{
        "request_count":   atomic.LoadInt64(&metrics.RequestCount),
        "request_latency": atomic.LoadInt64(&metrics.RequestLatency),
        "error_count":     atomic.LoadInt64(&metrics.ErrorCount),
    })
}

func getUser(c echo.Context) error {
    id := c.Param("id")
    key := "user:" + id
    
    if cached, ok := cache.Get(key); ok {
        return c.JSON(http.StatusOK, cached)
    }
    
    var user User
    if err := db.First(&user, id).Error; err != nil {
        return echo.NewHTTPError(http.StatusNotFound, "用户不存在")
    }
    
    cache.Set(key, user, 10*time.Minute)
    
    return c.JSON(http.StatusOK, user)
}

func listUsers(c echo.Context) error {
    page := 1
    size := 10
    
    key := fmt.Sprintf("users:%d:%d", page, size)
    
    if cached, ok := cache.Get(key); ok {
        return c.JSON(http.StatusOK, cached)
    }
    
    var users []User
    offset := (page - 1) * size
    db.Select("id, name, email").Offset(offset).Limit(size).Find(&users)
    
    cache.Set(key, users, 5*time.Minute)
    
    return c.JSON(http.StatusOK, users)
}

九、总结 #

性能优化要点:

方向 优化方法
中间件 合理顺序、跳过不必要
响应 Gzip压缩、ETag、缓存
数据库 连接池、查询优化、批量操作
缓存 Redis、本地缓存
并发 Goroutine池、并发请求
监控 pprof、自定义指标

准备好学习实战案例了吗?让我们进入下一章!

最后更新:2026-03-28