all: use Go 1.26 things, run most gofix modernizers

I omitted a lot of the min/max modernizers because they didn't
result in more clear code.

Some of it's older "for x := range 123".

Also: errors.AsType, any, fmt.Appendf, etc.

Updates #18682

Change-Id: I83a451577f33877f962766a5b65ce86f7696471c
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick
2026-03-05 21:13:57 +00:00
committed by Brad Fitzpatrick
parent 4453cc5f53
commit bd2a2d53d3
168 changed files with 431 additions and 618 deletions
+1 -1
View File
@@ -234,7 +234,7 @@ func ValidHostname(hostname string) error {
return err
}
for _, label := range strings.Split(fqdn.WithoutTrailingDot(), ".") {
for label := range strings.SplitSeq(fqdn.WithoutTrailingDot(), ".") {
if err := ValidLabel(label); err != nil {
return err
}
+1 -1
View File
@@ -52,7 +52,7 @@ func scrubHex(buf []byte) []byte {
in[0] = '?'
return
}
v := []byte(fmt.Sprintf("v%d%%%d", len(saw)+1, u64%8))
v := fmt.Appendf(nil, "v%d%%%d", len(saw)+1, u64%8)
saw[inStr] = v
copy(in, v)
})
+1 -1
View File
@@ -47,7 +47,7 @@ type hasher interface {
func hashSuite(h hasher) {
for i := range 10 {
for j := 0; j < 10; j++ {
for range 10 {
h.HashUint8(0x01)
h.HashUint8(0x23)
h.HashUint32(0x456789ab)
+1 -1
View File
@@ -44,7 +44,7 @@ func ParseRange(hdr string) (ranges []Range, ok bool) {
hdr = strings.Trim(hdr, ows) // per RFC 7230, section 3.2
units, elems, hasUnits := strings.Cut(hdr, "=")
elems = strings.TrimLeft(elems, ","+ows)
for _, elem := range strings.Split(elems, ",") {
for elem := range strings.SplitSeq(elems, ",") {
elem = strings.Trim(elem, ows) // per RFC 7230, section 7
switch {
case strings.HasPrefix(elem, "-"): // i.e., "-" suffix-length
+1 -1
View File
@@ -27,7 +27,7 @@ func TestUsedConsistently(t *testing.T) {
cmd := exec.Command("git", "grep", "-l", "-F", "http.Method")
cmd.Dir = rootDir
matches, _ := cmd.Output()
for _, fn := range strings.Split(strings.TrimSpace(string(matches)), "\n") {
for fn := range strings.SplitSeq(strings.TrimSpace(string(matches)), "\n") {
switch fn {
case "util/httpm/httpm.go", "util/httpm/httpm_test.go":
continue
+3 -4
View File
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"os"
"slices"
"strconv"
"strings"
)
@@ -60,10 +61,8 @@ func (n *fakeIPTables) Append(table, chain string, args ...string) error {
func (n *fakeIPTables) Exists(table, chain string, args ...string) (bool, error) {
k := table + "/" + chain
if rules, ok := n.n[k]; ok {
for _, rule := range rules {
if rule == strings.Join(args, " ") {
return true, nil
}
if slices.Contains(rules, strings.Join(args, " ")) {
return true, nil
}
return false, nil
} else {
+2 -2
View File
@@ -21,8 +21,8 @@ import (
func init() {
isNotExistError = func(err error) bool {
var e *iptables.Error
return errors.As(err, &e) && e.IsNotExist()
e, ok := errors.AsType[*iptables.Error](err)
return ok && e.IsNotExist()
}
}
+2 -2
View File
@@ -236,7 +236,7 @@ func portMapRule(t *nftables.Table, ch *nftables.Chain, tun string, targetIP net
// This metadata can then be used to find the rule.
// https://github.com/google/nftables/issues/48
func svcPortMapRuleMeta(svcName string, targetIP netip.Addr, pm PortMap) []byte {
return []byte(fmt.Sprintf("svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol))
return fmt.Appendf(nil, "svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol)
}
func (n *nftablesRunner) findRuleByMetadata(t *nftables.Table, ch *nftables.Chain, meta []byte) (*nftables.Rule, error) {
@@ -305,5 +305,5 @@ func protoFromString(s string) (uint8, error) {
// This metadata can then be used to find the rule.
// https://github.com/google/nftables/issues/48
func svcRuleMeta(svcName string, origDst, dst netip.Addr) []byte {
return []byte(fmt.Sprintf("svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String()))
return fmt.Appendf(nil, "svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String())
}
+1 -1
View File
@@ -1066,7 +1066,7 @@ func checkSNATRule_nft(t *testing.T, runner *nftablesRunner, fam nftables.TableF
if chain == nil {
t.Fatal("POSTROUTING chain does not exist")
}
meta := []byte(fmt.Sprintf("dst:%s,src:%s", dst.String(), src.String()))
meta := fmt.Appendf(nil, "dst:%s,src:%s", dst.String(), src.String())
wantsRule := snatRule(chain.Table, chain, src, dst, meta)
checkRule(t, wantsRule, runner.conn)
}
+3 -3
View File
@@ -94,12 +94,12 @@ func TestPool(t *testing.T) {
func TestTakeRandom(t *testing.T) {
p := Pool[int]{}
for i := 0; i < 10; i++ {
for i := range 10 {
p.Add(i + 100)
}
seen := make(map[int]bool)
for i := 0; i < 10; i++ {
for range 10 {
item, ok := p.TakeRandom()
if !ok {
t.Errorf("unexpected empty pool")
@@ -116,7 +116,7 @@ func TestTakeRandom(t *testing.T) {
t.Errorf("expected empty pool")
}
for i := 0; i < 10; i++ {
for i := range 10 {
want := 100 + i
if !seen[want] {
t.Errorf("item %v not seen", want)
+1 -1
View File
@@ -152,7 +152,7 @@ func (s bitSet) values() iter.Seq[uint64] {
return func(yield func(uint64) bool) {
// Hyrum-proofing: randomly iterate in forwards or reverse.
if rand.Uint64()%2 == 0 {
for i := 0; i < bits.UintSize; i++ {
for i := range bits.UintSize {
if s.contains(uint64(i)) && !yield(uint64(i)) {
return
}
+2 -2
View File
@@ -36,7 +36,7 @@ var errGoexit = errors.New("runtime.Goexit was called")
// A panicError is an arbitrary value recovered from a panic
// with the stack trace during the execution of given function.
type panicError struct {
value interface{}
value any
stack []byte
}
@@ -45,7 +45,7 @@ func (p *panicError) Error() string {
return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
}
func newPanicError(v interface{}) error {
func newPanicError(v any) error {
stack := debug.Stack()
// The first line of the stack trace is of the form "goroutine N [status]:"
+17 -22
View File
@@ -25,7 +25,7 @@ import (
func TestDo(t *testing.T) {
var g Group[string, any]
v, err, _ := g.Do("key", func() (interface{}, error) {
v, err, _ := g.Do("key", func() (any, error) {
return "bar", nil
})
if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
@@ -39,7 +39,7 @@ func TestDo(t *testing.T) {
func TestDoErr(t *testing.T) {
var g Group[string, any]
someErr := errors.New("Some error")
v, err, _ := g.Do("key", func() (interface{}, error) {
v, err, _ := g.Do("key", func() (any, error) {
return nil, someErr
})
if err != someErr {
@@ -55,7 +55,7 @@ func TestDoDupSuppress(t *testing.T) {
var wg1, wg2 sync.WaitGroup
c := make(chan string, 1)
var calls int32
fn := func() (interface{}, error) {
fn := func() (any, error) {
if atomic.AddInt32(&calls, 1) == 1 {
// First invocation.
wg1.Done()
@@ -72,9 +72,7 @@ func TestDoDupSuppress(t *testing.T) {
wg1.Add(1)
for range n {
wg1.Add(1)
wg2.Add(1)
go func() {
defer wg2.Done()
wg2.Go(func() {
wg1.Done()
v, err, _ := g.Do("key", fn)
if err != nil {
@@ -84,7 +82,7 @@ func TestDoDupSuppress(t *testing.T) {
if s, _ := v.(string); s != "bar" {
t.Errorf("Do = %T %v; want %q", v, v, "bar")
}
}()
})
}
wg1.Wait()
// At least one goroutine is in fn now and all of them have at
@@ -108,7 +106,7 @@ func TestForget(t *testing.T) {
)
go func() {
g.Do("key", func() (i interface{}, e error) {
g.Do("key", func() (i any, e error) {
close(firstStarted)
<-unblockFirst
close(firstFinished)
@@ -119,7 +117,7 @@ func TestForget(t *testing.T) {
g.Forget("key")
unblockSecond := make(chan struct{})
secondResult := g.DoChan("key", func() (i interface{}, e error) {
secondResult := g.DoChan("key", func() (i any, e error) {
<-unblockSecond
return 2, nil
})
@@ -127,7 +125,7 @@ func TestForget(t *testing.T) {
close(unblockFirst)
<-firstFinished
thirdResult := g.DoChan("key", func() (i interface{}, e error) {
thirdResult := g.DoChan("key", func() (i any, e error) {
return 3, nil
})
@@ -141,7 +139,7 @@ func TestForget(t *testing.T) {
func TestDoChan(t *testing.T) {
var g Group[string, any]
ch := g.DoChan("key", func() (interface{}, error) {
ch := g.DoChan("key", func() (any, error) {
return "bar", nil
})
@@ -160,7 +158,7 @@ func TestDoChan(t *testing.T) {
// See https://github.com/golang/go/issues/41133
func TestPanicDo(t *testing.T) {
var g Group[string, any]
fn := func() (interface{}, error) {
fn := func() (any, error) {
panic("invalid memory address or nil pointer dereference")
}
@@ -197,7 +195,7 @@ func TestPanicDo(t *testing.T) {
func TestGoexitDo(t *testing.T) {
var g Group[string, any]
fn := func() (interface{}, error) {
fn := func() (any, error) {
runtime.Goexit()
return nil, nil
}
@@ -238,7 +236,7 @@ func TestPanicDoChan(t *testing.T) {
}()
g := new(Group[string, any])
ch := g.DoChan("", func() (interface{}, error) {
ch := g.DoChan("", func() (any, error) {
panic("Panicking in DoChan")
})
<-ch
@@ -283,7 +281,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) {
defer func() {
recover()
}()
g.Do("", func() (interface{}, error) {
g.Do("", func() (any, error) {
close(blocked)
<-unblock
panic("Panicking in Do")
@@ -291,7 +289,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) {
}()
<-blocked
ch := g.DoChan("", func() (interface{}, error) {
ch := g.DoChan("", func() (any, error) {
panic("DoChan unexpectedly executed callback")
})
close(unblock)
@@ -325,8 +323,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) {
func TestDoChanContext(t *testing.T) {
t.Run("Basic", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
var g Group[string, int]
ch := g.DoChanContext(ctx, "key", func(_ context.Context) (int, error) {
@@ -337,8 +334,7 @@ func TestDoChanContext(t *testing.T) {
})
t.Run("DoesNotPropagateValues", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
key := new(int)
const value = "hello world"
@@ -364,8 +360,7 @@ func TestDoChanContext(t *testing.T) {
ctx1, cancel1 := context.WithCancel(context.Background())
defer cancel1()
ctx2, cancel2 := context.WithCancel(context.Background())
defer cancel2()
ctx2 := t.Context()
fn := func(ctx context.Context) (int, error) {
select {
+1 -1
View File
@@ -53,7 +53,7 @@ func TestShuffle(t *testing.T) {
}
var wasShuffled bool
for try := 0; try < 10; try++ {
for range 10 {
shuffled := slices.Clone(sl)
Shuffle(shuffled)
if !reflect.DeepEqual(shuffled, sl) {
+1 -6
View File
@@ -89,12 +89,7 @@ func (pc policyChanges) HasChanged(v pkey.Key) bool {
return ok
}
func (pc policyChanges) HasChangedAnyOf(keys ...pkey.Key) bool {
for _, k := range keys {
if pc.HasChanged(k) {
return true
}
}
return false
return slices.ContainsFunc(keys, pc.HasChanged)
}
const watchersKey = "_policytest_watchers"
+1 -1
View File
@@ -43,7 +43,7 @@ func TestTopK(t *testing.T) {
got []int
want = []int{5, 6, 7, 8, 9}
)
for try := 0; try < 10; try++ {
for range 10 {
topk := NewWithParams[int](5, func(in []byte, val int) []byte {
return binary.LittleEndian.AppendUint64(in, uint64(val))
}, 4, 1000)
+1 -2
View File
@@ -77,6 +77,5 @@ func WrapWithMessage(wrapped error, publicMsg string) error {
// As returns the first vizerror.Error in err's chain.
func As(err error) (e Error, ok bool) {
ok = errors.As(err, &e)
return
return errors.AsType[Error](err)
}
+14 -18
View File
@@ -128,7 +128,7 @@ func BenchmarkEncode(b *testing.B) {
b.Run(bb.name, func(b *testing.B) {
b.ReportAllocs()
b.SetBytes(int64(len(src)))
for range b.N {
for b.Loop() {
dst = AppendEncode(dst[:0], src, bb.opts...)
}
})
@@ -153,7 +153,7 @@ func BenchmarkDecode(b *testing.B) {
b.Run(bb.name, func(b *testing.B) {
b.ReportAllocs()
b.SetBytes(int64(len(src)))
for range b.N {
for b.Loop() {
dst = must.Get(AppendDecode(dst[:0], src, bb.opts...))
}
})
@@ -169,16 +169,14 @@ func BenchmarkEncodeParallel(b *testing.B) {
}
b.Run(coder.name, func(b *testing.B) {
b.ReportAllocs()
for range b.N {
var group sync.WaitGroup
for j := 0; j < numCPU; j++ {
group.Add(1)
go func(j int) {
defer group.Done()
for b.Loop() {
var wg sync.WaitGroup
for j := range numCPU {
wg.Go(func() {
dsts[j] = coder.appendEncode(dsts[j][:0], src)
}(j)
})
}
group.Wait()
wg.Wait()
}
})
}
@@ -194,16 +192,14 @@ func BenchmarkDecodeParallel(b *testing.B) {
}
b.Run(coder.name, func(b *testing.B) {
b.ReportAllocs()
for range b.N {
var group sync.WaitGroup
for j := 0; j < numCPU; j++ {
group.Add(1)
go func(j int) {
defer group.Done()
for b.Loop() {
var wg sync.WaitGroup
for j := range numCPU {
wg.Go(func() {
dsts[j] = must.Get(coder.appendDecode(dsts[j][:0], src))
}(j)
})
}
group.Wait()
wg.Wait()
}
})
}