control/controlbase: deflake, speed up TestConnMemoryOverhead

This had gotten flaky with Go 1.26.

Use synctest + AllocsPerRun to make it fast and deterministic.

Updates #18682

Change-Id: If673d6ecd8c1177f59c1b9c0f3fca42309375dff
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
main
Brad Fitzpatrick 1 month ago committed by Brad Fitzpatrick
parent bb45b2ebbd
commit 8e3d176f1c
  1. 97
      control/controlbase/conn_test.go

@ -11,12 +11,11 @@ import (
"fmt"
"io"
"net"
"runtime"
"strings"
"sync"
"testing"
"testing/iotest"
"time"
"testing/synctest"
chp "golang.org/x/crypto/chacha20poly1305"
"golang.org/x/net/nettest"
@ -226,79 +225,31 @@ func TestConnStd(t *testing.T) {
})
}
// tests that the idle memory overhead of a Conn blocked in a read is
// reasonable (under 2K). It was previously over 8KB with two 4KB
// buffers for rx/tx. This make sure we don't regress. Hopefully it
// doesn't turn into a flaky test. If so, const max can be adjusted,
// or it can be deleted or reworked.
// tests that the memory overhead of a Conn blocked in a read is
// reasonable. It was previously over 8KB with two 4KB buffers for
// rx/tx. This makes sure we don't regress.
func TestConnMemoryOverhead(t *testing.T) {
num := 1000
if testing.Short() {
num = 100
}
ng0 := runtime.NumGoroutine()
runtime.GC()
var ms0 runtime.MemStats
runtime.ReadMemStats(&ms0)
var closers []io.Closer
closeAll := func() {
for _, c := range closers {
c.Close()
}
closers = nil
}
defer closeAll()
for range num {
client, server := pair(t)
closers = append(closers, client, server)
go func() {
var buf [1]byte
client.Read(buf[:])
}()
}
t0 := time.Now()
deadline := t0.Add(3 * time.Second)
var ngo int
for time.Now().Before(deadline) {
runtime.GC()
ngo = runtime.NumGoroutine()
if ngo >= num {
break
synctest.Test(t, func(t *testing.T) {
// AllocsPerRun runs the function once for warmup (filling
// allocator slab caches, etc.) and then measures over the
// remaining runs, returning the average allocation count.
allocs := testing.AllocsPerRun(100, func() {
client, server := pair(t)
go func() {
var buf [1]byte
client.Read(buf[:])
}()
synctest.Wait()
client.Close()
server.Close()
synctest.Wait()
})
t.Logf("allocs per blocked-conn pair: %v", allocs)
const max = 400
if allocs > max {
t.Errorf("allocs per blocked-conn pair = %v, want <= %v", allocs, max)
}
time.Sleep(10 * time.Millisecond)
}
if ngo < num {
t.Fatalf("only %v goroutines; expected %v+", ngo, num)
}
runtime.GC()
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
growthTotal := int64(ms.HeapAlloc) - int64(ms0.HeapAlloc)
growthEach := float64(growthTotal) / float64(num)
t.Logf("Alloced %v bytes, %.2f B/each", growthTotal, growthEach)
const max = 2048
if growthEach > max {
t.Errorf("allocated more than expected; want max %v bytes/each", max)
}
closeAll()
// And make sure our goroutines go away too.
deadline = time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
ngo = runtime.NumGoroutine()
if ngo < ng0+num/10 {
break
}
time.Sleep(10 * time.Millisecond)
}
if ngo >= ng0+num/10 {
t.Errorf("goroutines didn't go back down; started at %v, now %v", ng0, ngo)
}
})
}
type readSink struct {

Loading…
Cancel
Save