tstest/integration: add integration test for Tailnet Lock
This patch adds an integration test for Tailnet Lock, checking that a node can't talk to peers in the tailnet until it becomes signed. This patch also introduces a new package `tstest/tkatest`, which has some helpers for constructing a mock control server that responds to TKA requests. This allows us to reduce boilerplate in the IPN tests. Updates tailscale/corp#33599 Signed-off-by: Alex Chan <alexc@tailscale.com>
This commit is contained in:
@@ -918,7 +918,7 @@ func (n *TestNode) Ping(otherNode *TestNode) error {
|
||||
t := n.env.t
|
||||
ip := otherNode.AwaitIP4().String()
|
||||
t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP4())
|
||||
return n.Tailscale("ping", ip).Run()
|
||||
return n.Tailscale("ping", "--timeout=1s", ip).Run()
|
||||
}
|
||||
|
||||
// AwaitListening waits for the tailscaled to be serving local clients
|
||||
@@ -1077,6 +1077,46 @@ func (n *TestNode) MustStatus() *ipnstate.Status {
|
||||
return st
|
||||
}
|
||||
|
||||
// PublicKey returns the hex-encoded public key of this node,
|
||||
// e.g. `nodekey:123456abc`
|
||||
func (n *TestNode) PublicKey() string {
|
||||
tb := n.env.t
|
||||
tb.Helper()
|
||||
cmd := n.Tailscale("status", "--json")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tb.Fatalf("running `tailscale status`: %v, %s", err, out)
|
||||
}
|
||||
|
||||
type Self struct{ PublicKey string }
|
||||
type StatusOutput struct{ Self Self }
|
||||
|
||||
var st StatusOutput
|
||||
if err := json.Unmarshal(out, &st); err != nil {
|
||||
tb.Fatalf("decoding `tailscale status` JSON: %v\njson:\n%s", err, out)
|
||||
}
|
||||
return st.Self.PublicKey
|
||||
}
|
||||
|
||||
// NLPublicKey returns the hex-encoded network lock public key of
|
||||
// this node, e.g. `tlpub:123456abc`
|
||||
func (n *TestNode) NLPublicKey() string {
|
||||
tb := n.env.t
|
||||
tb.Helper()
|
||||
cmd := n.Tailscale("lock", "status", "--json")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tb.Fatalf("running `tailscale lock status`: %v, %s", err, out)
|
||||
}
|
||||
st := struct {
|
||||
PublicKey string `json:"PublicKey"`
|
||||
}{}
|
||||
if err := json.Unmarshal(out, &st); err != nil {
|
||||
tb.Fatalf("decoding `tailscale lock status` JSON: %v\njson:\n%s", err, out)
|
||||
}
|
||||
return st.PublicKey
|
||||
}
|
||||
|
||||
// trafficTrap is an HTTP proxy handler to note whether any
|
||||
// HTTP traffic tries to leave localhost from tailscaled. We don't
|
||||
// expect any, so any request triggers a failure.
|
||||
|
||||
@@ -2253,7 +2253,7 @@ func TestC2NDebugNetmap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetworkLock(t *testing.T) {
|
||||
func TestTailnetLock(t *testing.T) {
|
||||
|
||||
// If you run `tailscale lock log` on a node where Tailnet Lock isn't
|
||||
// enabled, you get an error explaining that.
|
||||
@@ -2291,6 +2291,79 @@ func TestNetworkLock(t *testing.T) {
|
||||
t.Fatalf("stderr: want %q, got %q", wantErr, errBuf.String())
|
||||
}
|
||||
})
|
||||
|
||||
// If you create a tailnet with two signed nodes and one unsigned,
|
||||
// the signed nodes can talk to each other but the unsigned node cannot
|
||||
// talk to anybody.
|
||||
t.Run("node-connectivity", func(t *testing.T) {
|
||||
tstest.Shard(t)
|
||||
t.Parallel()
|
||||
|
||||
env := NewTestEnv(t)
|
||||
env.Control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{
|
||||
tailcfg.CapabilityTailnetLock: []tailcfg.RawMessage{},
|
||||
}
|
||||
|
||||
// Start two nodes which will be our signing nodes.
|
||||
signing1 := NewTestNode(t, env)
|
||||
signing2 := NewTestNode(t, env)
|
||||
|
||||
nodes := []*TestNode{signing1, signing2}
|
||||
for _, n := range nodes {
|
||||
d := n.StartDaemon()
|
||||
defer d.MustCleanShutdown(t)
|
||||
|
||||
n.MustUp()
|
||||
n.AwaitRunning()
|
||||
}
|
||||
|
||||
// Initiate Tailnet Lock with the two signing nodes.
|
||||
initCmd := signing1.Tailscale("lock", "init",
|
||||
"--gen-disablements", "10",
|
||||
"--confirm",
|
||||
signing1.NLPublicKey(), signing2.NLPublicKey(),
|
||||
)
|
||||
out, err := initCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("init command failed: %q\noutput=%v", err, string(out))
|
||||
}
|
||||
|
||||
// Check that the two signing nodes can ping each other
|
||||
if err := signing1.Ping(signing2); err != nil {
|
||||
t.Fatalf("ping signing1 -> signing2: %v", err)
|
||||
}
|
||||
if err := signing2.Ping(signing1); err != nil {
|
||||
t.Fatalf("ping signing2 -> signing1: %v", err)
|
||||
}
|
||||
|
||||
// Create and start a third node
|
||||
node3 := NewTestNode(t, env)
|
||||
d3 := node3.StartDaemon()
|
||||
defer d3.MustCleanShutdown(t)
|
||||
node3.MustUp()
|
||||
node3.AwaitRunning()
|
||||
|
||||
if err := signing1.Ping(node3); err == nil {
|
||||
t.Fatal("ping signing1 -> node3: expected err, but succeeded")
|
||||
}
|
||||
if err := node3.Ping(signing1); err == nil {
|
||||
t.Fatal("ping node3 -> signing1: expected err, but succeeded")
|
||||
}
|
||||
|
||||
// Sign node3, and check the nodes can now talk to each other
|
||||
signCmd := signing1.Tailscale("lock", "sign", node3.PublicKey())
|
||||
out, err = signCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("sign command failed: %q\noutput = %v", err, string(out))
|
||||
}
|
||||
|
||||
if err := signing1.Ping(node3); err != nil {
|
||||
t.Fatalf("ping signing1 -> node3: expected success, got err: %v", err)
|
||||
}
|
||||
if err := node3.Ping(signing1); err != nil {
|
||||
t.Fatalf("ping node3 -> signing1: expected success, got err: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeWithBadStateFile(t *testing.T) {
|
||||
|
||||
@@ -33,6 +33,8 @@ import (
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tstest/tkatest"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/opt"
|
||||
@@ -123,6 +125,10 @@ type Server struct {
|
||||
nodeKeyAuthed set.Set[key.NodePublic]
|
||||
msgToSend map[key.NodePublic]any // value is *tailcfg.PingRequest or entire *tailcfg.MapResponse
|
||||
allExpired bool // All nodes will be told their node key is expired.
|
||||
|
||||
// tkaStorage records the Tailnet Lock state, if any.
|
||||
// If nil, Tailnet Lock is not enabled in the Tailnet.
|
||||
tkaStorage tka.CompactableChonk
|
||||
}
|
||||
|
||||
// BaseURL returns the server's base URL, without trailing slash.
|
||||
@@ -329,6 +335,7 @@ func (s *Server) initMux() {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
})
|
||||
s.mux.HandleFunc("/key", s.serveKey)
|
||||
s.mux.HandleFunc("/machine/tka/", s.serveTKA)
|
||||
s.mux.HandleFunc("/machine/", s.serveMachine)
|
||||
s.mux.HandleFunc("/ts2021", s.serveNoiseUpgrade)
|
||||
s.mux.HandleFunc("/c2n/", s.serveC2N)
|
||||
@@ -439,7 +446,7 @@ func (s *Server) serveKey(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) serveMachine(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "POST required", 400)
|
||||
http.Error(w, "POST required for serveMachine", 400)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
@@ -861,6 +868,132 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.
|
||||
w.Write(res)
|
||||
}
|
||||
|
||||
func (s *Server) serveTKA(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "GET" {
|
||||
http.Error(w, "GET required for serveTKA", 400)
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
case "/machine/tka/init/begin":
|
||||
s.serveTKAInitBegin(w, r)
|
||||
case "/machine/tka/init/finish":
|
||||
s.serveTKAInitFinish(w, r)
|
||||
case "/machine/tka/bootstrap":
|
||||
s.serveTKABootstrap(w, r)
|
||||
case "/machine/tka/sync/offer":
|
||||
s.serveTKASyncOffer(w, r)
|
||||
case "/machine/tka/sign":
|
||||
s.serveTKASign(w, r)
|
||||
default:
|
||||
s.serveUnhandled(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) serveTKAInitBegin(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
nodes := maps.Values(s.nodes)
|
||||
genesisAUM, err := tkatest.HandleTKAInitBegin(w, r, nodes)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("HandleTKAInitBegin: %v", err))
|
||||
}
|
||||
s.tkaStorage = tka.ChonkMem()
|
||||
s.tkaStorage.CommitVerifiedAUMs([]tka.AUM{*genesisAUM})
|
||||
}
|
||||
|
||||
func (s *Server) serveTKAInitFinish(w http.ResponseWriter, r *http.Request) {
|
||||
signatures, err := tkatest.HandleTKAInitFinish(w, r)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("HandleTKAInitFinish: %v", err))
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Apply the signatures to each of the nodes. Because s.nodes is keyed
|
||||
// by public key instead of node ID, we have to do this inefficiently.
|
||||
//
|
||||
// We only have small tailnets in the integration tests, so this isn't
|
||||
// much of an issue.
|
||||
for nodeID, sig := range signatures {
|
||||
for _, n := range s.nodes {
|
||||
if n.ID == nodeID {
|
||||
n.KeySignature = sig
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) serveTKABootstrap(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.tkaStorage == nil {
|
||||
http.Error(w, "no TKA state when calling serveTKABootstrap", 400)
|
||||
return
|
||||
}
|
||||
|
||||
// Find the genesis AUM, which we need to include in the response.
|
||||
var genesis *tka.AUM
|
||||
allAUMs, err := s.tkaStorage.AllAUMs()
|
||||
if err != nil {
|
||||
http.Error(w, "unable to retrieve all AUMs from TKA state", 500)
|
||||
return
|
||||
}
|
||||
for _, h := range allAUMs {
|
||||
aum := must.Get(s.tkaStorage.AUM(h))
|
||||
if _, hasParent := aum.Parent(); !hasParent {
|
||||
genesis = &aum
|
||||
break
|
||||
}
|
||||
}
|
||||
if genesis == nil {
|
||||
http.Error(w, "unable to find genesis AUM in TKA state", 500)
|
||||
return
|
||||
}
|
||||
|
||||
resp := tailcfg.TKABootstrapResponse{
|
||||
GenesisAUM: genesis.Serialize(),
|
||||
}
|
||||
_, err = tkatest.HandleTKABootstrap(w, r, resp)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("HandleTKABootstrap: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) serveTKASyncOffer(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
authority, err := tka.Open(s.tkaStorage)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("serveTKASyncOffer: tka.Open: %v", err))
|
||||
}
|
||||
|
||||
err = tkatest.HandleTKASyncOffer(w, r, authority, s.tkaStorage)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("HandleTKASyncOffer: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) serveTKASign(w http.ResponseWriter, r *http.Request) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
authority, err := tka.Open(s.tkaStorage)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("serveTKASign: tka.Open: %v", err))
|
||||
}
|
||||
|
||||
sig, keyBeingSigned, err := tkatest.HandleTKASign(w, r, authority)
|
||||
if err != nil {
|
||||
go panic(fmt.Sprintf("HandleTKASign: %v", err))
|
||||
}
|
||||
s.nodes[*keyBeingSigned].KeySignature = *sig
|
||||
s.updateLocked("TKASign", s.nodeIDsLocked(0))
|
||||
}
|
||||
|
||||
// updateType indicates why a long-polling map request is being woken
|
||||
// up for an update.
|
||||
type updateType int
|
||||
@@ -1197,6 +1330,21 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse,
|
||||
v6Prefix,
|
||||
}
|
||||
|
||||
// If the server is tracking TKA state, and there's a single TKA head,
|
||||
// add it to the MapResponse.
|
||||
if s.tkaStorage != nil {
|
||||
heads, err := s.tkaStorage.Heads()
|
||||
if err != nil {
|
||||
log.Printf("unable to get TKA heads: %v", err)
|
||||
} else if len(heads) != 1 {
|
||||
log.Printf("unable to get single TKA head, got %v", heads)
|
||||
} else {
|
||||
res.TKAInfo = &tailcfg.TKAInfo{
|
||||
Head: heads[0].Hash().String(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
res.Node.PrimaryRoutes = s.nodeSubnetRoutes[nk]
|
||||
|
||||
Reference in New Issue
Block a user