|
| 1 | +package redis |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "net" |
| 7 | + "sync" |
| 8 | + "sync/atomic" |
| 9 | + "testing" |
| 10 | + "time" |
| 11 | +) |
| 12 | + |
| 13 | +// TestBadConfigurationHighLoad demonstrates the problem with default configuration |
| 14 | +// under high load with slow dials. |
| 15 | +func TestBadConfigurationHighLoad(t *testing.T) { |
| 16 | + var dialCount atomic.Int32 |
| 17 | + var dialsFailed atomic.Int32 |
| 18 | + var dialsSucceeded atomic.Int32 |
| 19 | + |
| 20 | + // Simulate slow network - 300ms per dial (e.g., network latency, TLS handshake) |
| 21 | + slowDialer := func(ctx context.Context, network, addr string) (net.Conn, error) { |
| 22 | + dialCount.Add(1) |
| 23 | + select { |
| 24 | + case <-time.After(300 * time.Millisecond): |
| 25 | + dialsSucceeded.Add(1) |
| 26 | + return &net.TCPConn{}, nil |
| 27 | + case <-ctx.Done(): |
| 28 | + dialsFailed.Add(1) |
| 29 | + return nil, ctx.Err() |
| 30 | + } |
| 31 | + } |
| 32 | + |
| 33 | + // BAD CONFIGURATION: Default settings |
| 34 | + // On an 8-CPU machine: |
| 35 | + // - PoolSize = 10 * 8 = 80 |
| 36 | + // - MaxConcurrentDials = 80 |
| 37 | + // - MinIdleConns = 0 (no pre-warming) |
| 38 | + opt := &Options{ |
| 39 | + Addr: "localhost:6379", |
| 40 | + Dialer: slowDialer, |
| 41 | + PoolSize: 80, // Default: 10 * GOMAXPROCS |
| 42 | + MaxConcurrentDials: 80, // Default: same as PoolSize |
| 43 | + MinIdleConns: 0, // Default: no pre-warming |
| 44 | + DialTimeout: 5 * time.Second, |
| 45 | + } |
| 46 | + |
| 47 | + client := NewClient(opt) |
| 48 | + defer client.Close() |
| 49 | + |
| 50 | + // Simulate high load: 200 concurrent requests with 200ms timeout |
| 51 | + // This simulates a burst of traffic (e.g., after a deployment or cache miss) |
| 52 | + const numRequests = 200 |
| 53 | + const requestTimeout = 200 * time.Millisecond |
| 54 | + |
| 55 | + var wg sync.WaitGroup |
| 56 | + var timeouts atomic.Int32 |
| 57 | + var successes atomic.Int32 |
| 58 | + var errors atomic.Int32 |
| 59 | + |
| 60 | + startTime := time.Now() |
| 61 | + |
| 62 | + for i := 0; i < numRequests; i++ { |
| 63 | + wg.Add(1) |
| 64 | + go func(id int) { |
| 65 | + defer wg.Done() |
| 66 | + |
| 67 | + ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) |
| 68 | + defer cancel() |
| 69 | + |
| 70 | + _, err := client.Get(ctx, fmt.Sprintf("key-%d", id)).Result() |
| 71 | + |
| 72 | + if err != nil { |
| 73 | + if ctx.Err() == context.DeadlineExceeded || err == context.DeadlineExceeded { |
| 74 | + timeouts.Add(1) |
| 75 | + } else { |
| 76 | + errors.Add(1) |
| 77 | + } |
| 78 | + } else { |
| 79 | + successes.Add(1) |
| 80 | + } |
| 81 | + }(i) |
| 82 | + |
| 83 | + // Stagger requests slightly to simulate real traffic |
| 84 | + if i%20 == 0 { |
| 85 | + time.Sleep(5 * time.Millisecond) |
| 86 | + } |
| 87 | + } |
| 88 | + |
| 89 | + wg.Wait() |
| 90 | + totalTime := time.Since(startTime) |
| 91 | + |
| 92 | + timeoutRate := float64(timeouts.Load()) / float64(numRequests) * 100 |
| 93 | + successRate := float64(successes.Load()) / float64(numRequests) * 100 |
| 94 | + |
| 95 | + t.Logf("\n=== BAD CONFIGURATION (Default Settings) ===") |
| 96 | + t.Logf("Configuration:") |
| 97 | + t.Logf(" PoolSize: %d", opt.PoolSize) |
| 98 | + t.Logf(" MaxConcurrentDials: %d", opt.MaxConcurrentDials) |
| 99 | + t.Logf(" MinIdleConns: %d", opt.MinIdleConns) |
| 100 | + t.Logf("\nResults:") |
| 101 | + t.Logf(" Total time: %v", totalTime) |
| 102 | + t.Logf(" Successes: %d (%.1f%%)", successes.Load(), successRate) |
| 103 | + t.Logf(" Timeouts: %d (%.1f%%)", timeouts.Load(), timeoutRate) |
| 104 | + t.Logf(" Other errors: %d", errors.Load()) |
| 105 | + t.Logf(" Total dials: %d (succeeded: %d, failed: %d)", |
| 106 | + dialCount.Load(), dialsSucceeded.Load(), dialsFailed.Load()) |
| 107 | + |
| 108 | + // With bad configuration: |
| 109 | + // - MaxConcurrentDials=80 means only 80 dials can run concurrently |
| 110 | + // - Each dial takes 300ms, but request timeout is 200ms |
| 111 | + // - Requests timeout waiting for dial slots |
| 112 | + // - Expected: High timeout rate (>50%) |
| 113 | + |
| 114 | + if timeoutRate < 50 { |
| 115 | + t.Logf("WARNING: Expected high timeout rate (>50%%), got %.1f%%. Test may not be stressing the system enough.", timeoutRate) |
| 116 | + } |
| 117 | +} |
| 118 | + |
| 119 | +// TestGoodConfigurationHighLoad demonstrates how proper configuration fixes the problem |
| 120 | +func TestGoodConfigurationHighLoad(t *testing.T) { |
| 121 | + var dialCount atomic.Int32 |
| 122 | + var dialsFailed atomic.Int32 |
| 123 | + var dialsSucceeded atomic.Int32 |
| 124 | + |
| 125 | + // Same slow dialer - 300ms per dial |
| 126 | + slowDialer := func(ctx context.Context, network, addr string) (net.Conn, error) { |
| 127 | + dialCount.Add(1) |
| 128 | + select { |
| 129 | + case <-time.After(300 * time.Millisecond): |
| 130 | + dialsSucceeded.Add(1) |
| 131 | + return &net.TCPConn{}, nil |
| 132 | + case <-ctx.Done(): |
| 133 | + dialsFailed.Add(1) |
| 134 | + return nil, ctx.Err() |
| 135 | + } |
| 136 | + } |
| 137 | + |
| 138 | + // GOOD CONFIGURATION: Tuned for high load |
| 139 | + opt := &Options{ |
| 140 | + Addr: "localhost:6379", |
| 141 | + Dialer: slowDialer, |
| 142 | + PoolSize: 300, // Increased from 80 |
| 143 | + MaxConcurrentDials: 300, // Increased from 80 |
| 144 | + MinIdleConns: 50, // Pre-warm the pool |
| 145 | + DialTimeout: 5 * time.Second, |
| 146 | + } |
| 147 | + |
| 148 | + client := NewClient(opt) |
| 149 | + defer client.Close() |
| 150 | + |
| 151 | + // Wait for pool to warm up |
| 152 | + time.Sleep(100 * time.Millisecond) |
| 153 | + |
| 154 | + // Same load: 200 concurrent requests with 200ms timeout |
| 155 | + const numRequests = 200 |
| 156 | + const requestTimeout = 200 * time.Millisecond |
| 157 | + |
| 158 | + var wg sync.WaitGroup |
| 159 | + var timeouts atomic.Int32 |
| 160 | + var successes atomic.Int32 |
| 161 | + var errors atomic.Int32 |
| 162 | + |
| 163 | + startTime := time.Now() |
| 164 | + |
| 165 | + for i := 0; i < numRequests; i++ { |
| 166 | + wg.Add(1) |
| 167 | + go func(id int) { |
| 168 | + defer wg.Done() |
| 169 | + |
| 170 | + ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) |
| 171 | + defer cancel() |
| 172 | + |
| 173 | + _, err := client.Get(ctx, fmt.Sprintf("key-%d", id)).Result() |
| 174 | + |
| 175 | + if err != nil { |
| 176 | + if ctx.Err() == context.DeadlineExceeded || err == context.DeadlineExceeded { |
| 177 | + timeouts.Add(1) |
| 178 | + } else { |
| 179 | + errors.Add(1) |
| 180 | + } |
| 181 | + } else { |
| 182 | + successes.Add(1) |
| 183 | + } |
| 184 | + }(i) |
| 185 | + |
| 186 | + // Stagger requests slightly |
| 187 | + if i%20 == 0 { |
| 188 | + time.Sleep(5 * time.Millisecond) |
| 189 | + } |
| 190 | + } |
| 191 | + |
| 192 | + wg.Wait() |
| 193 | + totalTime := time.Since(startTime) |
| 194 | + |
| 195 | + timeoutRate := float64(timeouts.Load()) / float64(numRequests) * 100 |
| 196 | + successRate := float64(successes.Load()) / float64(numRequests) * 100 |
| 197 | + |
| 198 | + t.Logf("\n=== GOOD CONFIGURATION (Tuned Settings) ===") |
| 199 | + t.Logf("Configuration:") |
| 200 | + t.Logf(" PoolSize: %d", opt.PoolSize) |
| 201 | + t.Logf(" MaxConcurrentDials: %d", opt.MaxConcurrentDials) |
| 202 | + t.Logf(" MinIdleConns: %d", opt.MinIdleConns) |
| 203 | + t.Logf("\nResults:") |
| 204 | + t.Logf(" Total time: %v", totalTime) |
| 205 | + t.Logf(" Successes: %d (%.1f%%)", successes.Load(), successRate) |
| 206 | + t.Logf(" Timeouts: %d (%.1f%%)", timeouts.Load(), timeoutRate) |
| 207 | + t.Logf(" Other errors: %d", errors.Load()) |
| 208 | + t.Logf(" Total dials: %d (succeeded: %d, failed: %d)", |
| 209 | + dialCount.Load(), dialsSucceeded.Load(), dialsFailed.Load()) |
| 210 | + |
| 211 | + // With good configuration: |
| 212 | + // - Higher MaxConcurrentDials allows more concurrent dials |
| 213 | + // - MinIdleConns pre-warms the pool |
| 214 | + // - Expected: Low timeout rate (<20%) |
| 215 | + |
| 216 | + if timeoutRate > 20 { |
| 217 | + t.Errorf("Expected low timeout rate (<20%%), got %.1f%%", timeoutRate) |
| 218 | + } |
| 219 | +} |
| 220 | + |
| 221 | +// TestConfigurationComparison runs both tests and shows the difference |
| 222 | +func TestConfigurationComparison(t *testing.T) { |
| 223 | + t.Run("BadConfiguration", TestBadConfigurationHighLoad) |
| 224 | + t.Run("GoodConfiguration", TestGoodConfigurationHighLoad) |
| 225 | +} |
| 226 | + |
0 commit comments