appc,feature/conn25: conn25: send address assignments to connector

After we intercept a DNS response and assign magic and transit addresses
we must communicate the assignment to our connector so that it can
direct traffic when it arrives.

Use the recently added peerapi endpoint to send the addresses.

Updates tailscale/corp#34258
Signed-off-by: Fran Bull <fran@tailscale.com>
main
Fran Bull 2 months ago committed by franbull
parent 6a19995f13
commit a4614d7d17
  1. 50
      appc/conn25.go
  2. 156
      appc/conn25_test.go
  3. 2
      cmd/k8s-operator/depaware.txt
  4. 2
      cmd/tailscaled/depaware-min.txt
  5. 2
      cmd/tailscaled/depaware-minbox.txt
  6. 2
      cmd/tailscaled/depaware.txt
  7. 2
      cmd/tsidp/depaware.txt
  8. 167
      feature/conn25/conn25.go
  9. 138
      feature/conn25/conn25_test.go
  10. 2
      tsnet/depaware.txt

@ -7,6 +7,7 @@ import (
"cmp"
"slices"
"tailscale.com/ipn/ipnext"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
"tailscale.com/util/mak"
@ -15,6 +16,43 @@ import (
const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental"
func isEligibleConnector(peer tailcfg.NodeView) bool {
if !peer.Valid() || !peer.Hostinfo().Valid() {
return false
}
isConn, _ := peer.Hostinfo().AppConnector().Get()
return isConn
}
func sortByPreference(ns []tailcfg.NodeView) {
// The ordering of the nodes is semantic (callers use the first node they can
// get a peer api url for). We don't (currently 2026-02-27) have any
// preference over which node is chosen as long as it's consistent. In the
// future we anticipate integrating with traffic steering.
slices.SortFunc(ns, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID())
})
}
// PickConnector returns peers the backend knows about that match the app, in order of preference to use as
// a connector.
func PickConnector(nb ipnext.NodeBackend, app appctype.Conn25Attr) []tailcfg.NodeView {
appTagsSet := set.SetOf(app.Connectors)
matches := nb.AppendMatchingPeers(nil, func(n tailcfg.NodeView) bool {
if !isEligibleConnector(n) {
return false
}
for _, t := range n.Tags().All() {
if appTagsSet.Contains(t) {
return true
}
}
return false
})
sortByPreference(matches)
return matches
}
// PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers
// want to be connectors for which domains.
func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.NodeView, peers map[tailcfg.NodeID]tailcfg.NodeView) map[string][]tailcfg.NodeView {
@ -36,10 +74,7 @@ func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.
// use a Set of NodeIDs to deduplicate, and populate into a []NodeView later.
var work map[string]set.Set[tailcfg.NodeID]
for _, peer := range peers {
if !peer.Valid() || !peer.Hostinfo().Valid() {
continue
}
if isConn, _ := peer.Hostinfo().AppConnector().Get(); !isConn {
if !isEligibleConnector(peer) {
continue
}
for _, t := range peer.Tags().All() {
@ -60,12 +95,7 @@ func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.
for id := range ids {
nodes = append(nodes, peers[id])
}
// The ordering of the nodes in the map vals is semantic (dnsConfigForNetmap uses the first node it can
// get a peer api url for as its split dns target). We can think of it as a preference order, except that
// we don't (currently 2026-01-14) have any preference over which node is chosen.
slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int {
return cmp.Compare(a.ID(), b.ID())
})
sortByPreference(nodes)
mak.Set(&m, domain, nodes)
}
return m

@ -8,6 +8,8 @@ import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"tailscale.com/ipn/ipnext"
"tailscale.com/tailcfg"
"tailscale.com/types/appctype"
"tailscale.com/types/opt"
@ -131,3 +133,157 @@ func TestPickSplitDNSPeers(t *testing.T) {
})
}
}
type testNodeBackend struct {
ipnext.NodeBackend
peers []tailcfg.NodeView
}
func (nb *testNodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
for _, p := range nb.peers {
if pred(p) {
base = append(base, p)
}
}
return base
}
func (nb *testNodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
return true
}
func TestPickConnector(t *testing.T) {
exampleApp := appctype.Conn25Attr{
Name: "example",
Connectors: []string{"tag:example"},
Domains: []string{"example.com"},
}
nvWithConnectorSet := func(id tailcfg.NodeID, isConnector bool, tags ...string) tailcfg.NodeView {
return (&tailcfg.Node{
ID: id,
Tags: tags,
Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(isConnector)}).View(),
}).View()
}
nv := func(id tailcfg.NodeID, tags ...string) tailcfg.NodeView {
return nvWithConnectorSet(id, true, tags...)
}
for _, tt := range []struct {
name string
candidates []tailcfg.NodeView
app appctype.Conn25Attr
want []tailcfg.NodeView
}{
{
name: "empty-everything",
candidates: []tailcfg.NodeView{},
app: appctype.Conn25Attr{},
want: nil,
},
{
name: "empty-candidates",
candidates: []tailcfg.NodeView{},
app: exampleApp,
want: nil,
},
{
name: "empty-app",
candidates: []tailcfg.NodeView{nv(1, "tag:example")},
app: appctype.Conn25Attr{},
want: nil,
},
{
name: "one-matches",
candidates: []tailcfg.NodeView{nv(1, "tag:example")},
app: exampleApp,
want: []tailcfg.NodeView{nv(1, "tag:example")},
},
{
name: "invalid-candidate",
candidates: []tailcfg.NodeView{
{},
nv(1, "tag:example"),
},
app: exampleApp,
want: []tailcfg.NodeView{
nv(1, "tag:example"),
},
},
{
name: "no-host-info",
candidates: []tailcfg.NodeView{
(&tailcfg.Node{
ID: 1,
Tags: []string{"tag:example"},
}).View(),
nv(2, "tag:example"),
},
app: exampleApp,
want: []tailcfg.NodeView{nv(2, "tag:example")},
},
{
name: "not-a-connector",
candidates: []tailcfg.NodeView{nvWithConnectorSet(1, false, "tag:example.com"), nv(2, "tag:example")},
app: exampleApp,
want: []tailcfg.NodeView{nv(2, "tag:example")},
},
{
name: "without-matches",
candidates: []tailcfg.NodeView{nv(1, "tag:woo"), nv(2, "tag:example")},
app: exampleApp,
want: []tailcfg.NodeView{nv(2, "tag:example")},
},
{
name: "multi-tags",
candidates: []tailcfg.NodeView{nv(1, "tag:woo", "tag:hoo"), nv(2, "tag:woo", "tag:example")},
app: exampleApp,
want: []tailcfg.NodeView{nv(2, "tag:woo", "tag:example")},
},
{
name: "multi-matches",
candidates: []tailcfg.NodeView{nv(1, "tag:woo", "tag:hoo"), nv(2, "tag:woo", "tag:example"), nv(3, "tag:example1", "tag:example")},
app: appctype.Conn25Attr{
Name: "example2",
Connectors: []string{"tag:example1", "tag:example"},
Domains: []string{"example.com"},
},
want: []tailcfg.NodeView{nv(2, "tag:woo", "tag:example"), nv(3, "tag:example1", "tag:example")},
},
{
name: "bit-of-everything",
candidates: []tailcfg.NodeView{
nv(3, "tag:woo", "tag:hoo"),
{},
nv(2, "tag:woo", "tag:example"),
nvWithConnectorSet(4, false, "tag:example"),
nv(1, "tag:example1", "tag:example"),
nv(7, "tag:example1", "tag:example"),
nvWithConnectorSet(5, false),
nv(6),
nvWithConnectorSet(8, false, "tag:example"),
nvWithConnectorSet(9, false),
nvWithConnectorSet(10, false),
},
app: appctype.Conn25Attr{
Name: "example2",
Connectors: []string{"tag:example1", "tag:example", "tag:example2"},
Domains: []string{"example.com"},
},
want: []tailcfg.NodeView{
nv(1, "tag:example1", "tag:example"),
nv(2, "tag:woo", "tag:example"),
nv(7, "tag:example1", "tag:example"),
},
},
} {
t.Run(tt.name, func(t *testing.T) {
got := PickConnector(&testNodeBackend{peers: tt.candidates}, tt.app)
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Fatalf("PickConnectors (-want, +got):\n%s", diff)
}
})
}
}

@ -820,7 +820,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/
tailscale.com/ipn from tailscale.com/client/local+
tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+
tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+
tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnstate from tailscale.com/client/local+

@ -70,7 +70,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/ipn from tailscale.com/cmd/tailscaled+
tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+
tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+
tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+
tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled

@ -85,7 +85,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/ipn from tailscale.com/cmd/tailscaled+
tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+
tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+
tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+
tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled

@ -250,7 +250,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+
gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+
tailscale.com from tailscale.com/version
tailscale.com/appc from tailscale.com/ipn/ipnlocal
tailscale.com/appc from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/atomicfile from tailscale.com/ipn+
LD tailscale.com/chirp from tailscale.com/cmd/tailscaled
tailscale.com/client/local from tailscale.com/client/web+

@ -239,7 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar
tailscale.com/ipn from tailscale.com/client/local+
tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+
tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+
tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnstate from tailscale.com/client/local+

@ -8,8 +8,12 @@
package conn25
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/netip"
"slices"
@ -17,6 +21,7 @@ import (
"go4.org/netipx"
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/appc"
"tailscale.com/feature"
"tailscale.com/ipn/ipnext"
"tailscale.com/ipn/ipnlocal"
@ -33,16 +38,30 @@ import (
// It is also the [extension] name and the log prefix.
const featureName = "conn25"
const maxBodyBytes = 1024 * 1024
// jsonDecode decodes all of a io.ReadCloser (eg an http.Request Body) into one pointer with best practices.
// It limits the size of bytes it will read.
// It either decodes all of the bytes into the pointer, or errors (unlike json.Decoder.Decode).
// It closes the ReadCloser after reading.
func jsonDecode(target any, rc io.ReadCloser) error {
defer rc.Close()
respBs, err := io.ReadAll(io.LimitReader(rc, maxBodyBytes+1))
if err != nil {
return err
}
err = json.Unmarshal(respBs, &target)
return err
}
func init() {
feature.Register(featureName)
newExtension := func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) {
e := &extension{
ipnext.RegisterExtension(featureName, func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) {
return &extension{
conn25: newConn25(logger.WithPrefix(logf, "conn25: ")),
backend: sb,
}
return e, nil
}
ipnext.RegisterExtension(featureName, newExtension)
}, nil
})
ipnlocal.RegisterPeerAPIHandler("/v0/connector/transit-ip", handleConnectorTransitIP)
}
@ -61,6 +80,9 @@ type extension struct {
conn25 *Conn25 // safe for concurrent access and only set at creation
backend ipnext.SafeBackend // safe for concurrent access and only set at creation
host ipnext.Host // set in Init, read-only after
ctxCancel context.CancelCauseFunc // cancels sendLoop goroutine
mu sync.Mutex // protects the fields below
isDNSHookRegistered bool
}
@ -72,17 +94,32 @@ func (e *extension) Name() string {
// Init implements [ipnext.Extension].
func (e *extension) Init(host ipnext.Host) error {
//Init only once
e.mu.Lock()
defer e.mu.Unlock()
if e.ctxCancel != nil {
return nil
}
e.host = host
host.Hooks().OnSelfChange.Add(e.onSelfChange)
ctx, cancel := context.WithCancelCause(context.Background())
e.ctxCancel = cancel
go e.sendLoop(ctx)
return nil
}
// Shutdown implements [ipnlocal.Extension].
func (e *extension) Shutdown() error {
if e.ctxCancel != nil {
e.ctxCancel(errors.New("extension shutdown"))
}
if e.conn25 != nil {
close(e.conn25.client.addrsCh)
}
return nil
}
func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) {
const maxBodyBytes = 1024 * 1024
defer r.Body.Close()
if r.Method != "POST" {
http.Error(w, "Method should be POST", http.StatusMethodNotAllowed)
@ -172,7 +209,10 @@ func (c *Conn25) isConfigured() bool {
func newConn25(logf logger.Logf) *Conn25 {
c := &Conn25{
client: &client{logf: logf},
client: &client{
logf: logf,
addrsCh: make(chan addrs, 64),
},
connector: &connector{logf: logf},
}
return c
@ -310,7 +350,8 @@ const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experime
type config struct {
isConfigured bool
apps []appctype.Conn25Attr
appsByDomain map[dnsname.FQDN][]string
appsByName map[string]appctype.Conn25Attr
appNamesByDomain map[dnsname.FQDN][]string
selfRoutedDomains set.Set[dnsname.FQDN]
}
@ -326,7 +367,8 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) {
cfg := config{
isConfigured: true,
apps: apps,
appsByDomain: map[dnsname.FQDN][]string{},
appsByName: map[string]appctype.Conn25Attr{},
appNamesByDomain: map[dnsname.FQDN][]string{},
selfRoutedDomains: set.Set[dnsname.FQDN]{},
}
for _, app := range apps {
@ -336,11 +378,12 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) {
if err != nil {
return config{}, err
}
mak.Set(&cfg.appsByDomain, fqdn, append(cfg.appsByDomain[fqdn], app.Name))
mak.Set(&cfg.appNamesByDomain, fqdn, append(cfg.appNamesByDomain[fqdn], app.Name))
if selfMatchesTags {
cfg.selfRoutedDomains.Add(fqdn)
}
}
mak.Set(&cfg.appsByName, app.Name, app)
}
return cfg, nil
}
@ -350,7 +393,8 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) {
// connectors.
// It's safe for concurrent use.
type client struct {
logf logger.Logf
logf logger.Logf
addrsCh chan addrs
mu sync.Mutex // protects the fields below
magicIPPool *ippool
@ -402,7 +446,7 @@ func (c *client) reconfig(newCfg config) error {
func (c *client) isConnectorDomain(domain dnsname.FQDN) bool {
c.mu.Lock()
defer c.mu.Unlock()
appNames, ok := c.config.appsByDomain[domain]
appNames, ok := c.config.appNamesByDomain[domain]
return ok && len(appNames) > 0
}
@ -416,7 +460,7 @@ func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, e
if existing, ok := c.assignments.lookupByDomainDst(domain, dst); ok {
return existing, nil
}
appNames, _ := c.config.appsByDomain[domain]
appNames, _ := c.config.appNamesByDomain[domain]
// only reserve for first app
app := appNames[0]
mip, err := c.magicIPPool.next()
@ -437,12 +481,100 @@ func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, e
if err := c.assignments.insert(as); err != nil {
return addrs{}, err
}
err = c.enqueueAddressAssignment(as)
if err != nil {
return addrs{}, err
}
return as, nil
}
func (c *client) enqueueAddressAssignment(addrs addrs) {
// TODO(fran) 2026-02-03 asynchronously send peerapi req to connector to
// allocate these addresses for us.
func (e *extension) sendLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case as := <-e.conn25.client.addrsCh:
if err := e.sendAddressAssignment(ctx, as); err != nil {
e.conn25.client.logf("error sending transit IP assignment (app: %s, mip: %v, src: %v): %v", as.app, as.magic, as.dst, err)
}
}
}
}
func (c *client) enqueueAddressAssignment(addrs addrs) error {
select {
// TODO(fran) investigate the value of waiting for multiple addresses and sending them
// in one ConnectorTransitIPRequest
case c.addrsCh <- addrs:
return nil
default:
c.logf("address assignment queue full, dropping transit assignment for %v", addrs.domain)
return errors.New("queue full")
}
}
func makePeerAPIReq(ctx context.Context, httpClient *http.Client, urlBase string, as addrs) error {
url := urlBase + "/v0/connector/transit-ip"
reqBody := ConnectorTransitIPRequest{
TransitIPs: []TransitIPRequest{{
TransitIP: as.transit,
DestinationIP: as.dst,
App: as.app,
}},
}
bs, err := json.Marshal(reqBody)
if err != nil {
return fmt.Errorf("marshalling request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bs))
if err != nil {
return fmt.Errorf("creating request: %w", err)
}
resp, err := httpClient.Do(req)
if err != nil {
return fmt.Errorf("sending request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("connector returned HTTP %d", resp.StatusCode)
}
var respBody ConnectorTransitIPResponse
err = jsonDecode(&respBody, resp.Body)
if err != nil {
return fmt.Errorf("decoding response: %w", err)
}
if len(respBody.TransitIPs) > 0 && respBody.TransitIPs[0].Code != OK {
return fmt.Errorf("connector error: %s", respBody.TransitIPs[0].Message)
}
return nil
}
func (e *extension) sendAddressAssignment(ctx context.Context, as addrs) error {
app, ok := e.conn25.client.config.appsByName[as.app]
if !ok {
e.conn25.client.logf("App not found for app: %s (domain: %s)", as.app, as.domain)
return errors.New("app not found")
}
nb := e.host.NodeBackend()
peers := appc.PickConnector(nb, app)
var urlBase string
for _, p := range peers {
urlBase = nb.PeerAPIBase(p)
if urlBase != "" {
break
}
}
if urlBase == "" {
return errors.New("no connector peer found to handle address assignment")
}
client := e.backend.Sys().Dialer.Get().PeerAPIHTTPClient()
return makePeerAPIReq(ctx, client, urlBase, as)
}
func (c *client) mapDNSResponse(buf []byte) []byte {
@ -501,7 +633,6 @@ func (c *client) mapDNSResponse(buf []byte) []byte {
c.logf("assigned connector addresses unexpectedly empty: %v", err)
return buf
}
c.enqueueAddressAssignment(addrs)
default:
if err := p.SkipAnswer(); err != nil {
c.logf("error parsing dns response: %v", err)

@ -5,17 +5,24 @@ package conn25
import (
"encoding/json"
"net/http"
"net/http/httptest"
"net/netip"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go4.org/netipx"
"golang.org/x/net/dns/dnsmessage"
"tailscale.com/ipn/ipnext"
"tailscale.com/net/tsdial"
"tailscale.com/tailcfg"
"tailscale.com/tsd"
"tailscale.com/types/appctype"
"tailscale.com/types/logger"
"tailscale.com/types/opt"
"tailscale.com/util/dnsname"
"tailscale.com/util/must"
"tailscale.com/util/set"
@ -214,7 +221,7 @@ func TestReserveIPs(t *testing.T) {
c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24"))
mbd := map[dnsname.FQDN][]string{}
mbd["example.com."] = []string{"a"}
c.client.config.appsByDomain = mbd
c.client.config.appNamesByDomain = mbd
dst := netip.MustParseAddr("0.0.0.1")
addrs, err := c.client.reserveAddresses("example.com.", dst)
@ -340,7 +347,7 @@ func TestConfigReconfig(t *testing.T) {
if (err != nil) != tt.wantErr {
t.Fatalf("wantErr: %t, err: %v", tt.wantErr, err)
}
if diff := cmp.Diff(tt.wantAppsByDomain, c.appsByDomain); diff != "" {
if diff := cmp.Diff(tt.wantAppsByDomain, c.appNamesByDomain); diff != "" {
t.Errorf("appsByDomain diff (-want, +got):\n%s", diff)
}
if diff := cmp.Diff(tt.wantSelfRoutedDomains, c.selfRoutedDomains); diff != "" {
@ -499,7 +506,7 @@ func TestReserveAddressesDeduplicated(t *testing.T) {
c := newConn25(logger.Discard)
c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24"))
c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24"))
c.client.config.appsByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}}
c.client.config.appNamesByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}}
dst := netip.MustParseAddr("0.0.0.1")
first, err := c.client.reserveAddresses("example.com.", dst)
@ -522,3 +529,128 @@ func TestReserveAddressesDeduplicated(t *testing.T) {
t.Errorf("want 1 entry in byDomainDst, got %d", got)
}
}
type testNodeBackend struct {
ipnext.NodeBackend
peers []tailcfg.NodeView
peerAPIURL string // should be per peer but there's only one peer in our test so this is ok for now
}
func (nb *testNodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView {
for _, p := range nb.peers {
if pred(p) {
base = append(base, p)
}
}
return base
}
func (nb *testNodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool {
return true
}
func (nb *testNodeBackend) PeerAPIBase(p tailcfg.NodeView) string {
return nb.peerAPIURL
}
type testHost struct {
ipnext.Host
nb ipnext.NodeBackend
hooks ipnext.Hooks
}
func (h *testHost) NodeBackend() ipnext.NodeBackend { return h.nb }
func (h *testHost) Hooks() *ipnext.Hooks { return &h.hooks }
type testSafeBackend struct {
ipnext.SafeBackend
sys *tsd.System
}
func (b *testSafeBackend) Sys() *tsd.System { return b.sys }
// TestEnqueueAddress tests that after enqueueAddress has been called a
// peerapi request is made to a peer.
func TestEnqueueAddress(t *testing.T) {
// make a fake peer to test against
received := make(chan ConnectorTransitIPRequest, 1)
peersAPI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/v0/connector/transit-ip" {
http.Error(w, "unexpected path", http.StatusNotFound)
return
}
var req ConnectorTransitIPRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "bad body", http.StatusBadRequest)
return
}
received <- req
resp := ConnectorTransitIPResponse{
TransitIPs: []TransitIPResponse{{Code: OK}},
}
json.NewEncoder(w).Encode(resp)
}))
defer peersAPI.Close()
connectorPeer := (&tailcfg.Node{
ID: tailcfg.NodeID(1),
Tags: []string{"tag:woo"},
Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(true)}).View(),
}).View()
// make extension to test
sys := &tsd.System{}
sys.Dialer.Set(&tsdial.Dialer{Logf: logger.Discard})
ext := &extension{
conn25: newConn25(logger.Discard),
backend: &testSafeBackend{sys: sys},
}
if err := ext.Init(&testHost{
nb: &testNodeBackend{
peers: []tailcfg.NodeView{connectorPeer},
peerAPIURL: peersAPI.URL,
},
}); err != nil {
t.Fatal(err)
}
defer ext.Shutdown()
sn := makeSelfNode(t, appctype.Conn25Attr{
Name: "app1",
Connectors: []string{"tag:woo"},
Domains: []string{"example.com"},
}, []string{})
err := ext.conn25.reconfig(sn)
if err != nil {
t.Fatal(err)
}
as := addrs{
dst: netip.MustParseAddr("1.2.3.4"),
magic: netip.MustParseAddr("100.64.0.0"),
transit: netip.MustParseAddr("169.254.0.1"),
domain: "example.com.",
app: "app1",
}
ext.conn25.client.enqueueAddressAssignment(as)
select {
case got := <-received:
if len(got.TransitIPs) != 1 {
t.Fatalf("want 1 TransitIP in request, got %d", len(got.TransitIPs))
}
tip := got.TransitIPs[0]
if tip.TransitIP != as.transit {
t.Errorf("TransitIP: got %v, want %v", tip.TransitIP, as.transit)
}
if tip.DestinationIP != as.dst {
t.Errorf("DestinationIP: got %v, want %v", tip.DestinationIP, as.dst)
}
if tip.App != as.app {
t.Errorf("App: got %q, want %q", tip.App, as.app)
}
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for connector to receive request")
}
}

@ -235,7 +235,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware)
tailscale.com/ipn from tailscale.com/client/local+
tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+
💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+
tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+
tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/ipnstate from tailscale.com/client/local+

Loading…
Cancel
Save