feature/featuretags: add build tag to remove captive portal detection
This doesn't yet fully pull it out into a feature/captiveportal package. This is the usual first step, moving the code to its own files within the same packages. Updates #17254 Change-Id: Idfaec839debf7c96f51ca6520ce36ccf2f8eec92 Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
committed by
Brad Fitzpatrick
parent
4657cbdb11
commit
8fe575409f
@@ -0,0 +1,186 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !ts_omit_captiveportal
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/net/captivedetection"
|
||||
"tailscale.com/util/clientmetric"
|
||||
)
|
||||
|
||||
func init() {
|
||||
hookCaptivePortalHealthChange.Set(captivePortalHealthChange)
|
||||
hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop)
|
||||
}
|
||||
|
||||
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
||||
|
||||
// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken
|
||||
// before running captive portal detection.
|
||||
const captivePortalDetectionInterval = 2 * time.Second
|
||||
|
||||
func captivePortalHealthChange(b *LocalBackend, state *health.State) {
|
||||
isConnectivityImpacted := false
|
||||
for _, w := range state.Warnings {
|
||||
// Ignore the captive portal warnable itself.
|
||||
if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code {
|
||||
isConnectivityImpacted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// captiveCtx can be changed, and is protected with 'mu'; grab that
|
||||
// before we start our select, below.
|
||||
//
|
||||
// It is guaranteed to be non-nil.
|
||||
b.mu.Lock()
|
||||
ctx := b.captiveCtx
|
||||
b.mu.Unlock()
|
||||
|
||||
// If the context is canceled, we don't need to do anything.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if isConnectivityImpacted {
|
||||
b.logf("health: connectivity impacted; triggering captive portal detection")
|
||||
|
||||
// Ensure that we select on captiveCtx so that we can time out
|
||||
// triggering captive portal detection if the backend is shutdown.
|
||||
select {
|
||||
case b.needsCaptiveDetection <- true:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
} else {
|
||||
// If connectivity is not impacted, we know for sure we're not behind a captive portal,
|
||||
// so drop any warning, and signal that we don't need captive portal detection.
|
||||
b.health.SetHealthy(captivePortalWarnable)
|
||||
select {
|
||||
case b.needsCaptiveDetection <- false:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected.
|
||||
var captivePortalWarnable = health.Register(&health.Warnable{
|
||||
Code: "captive-portal-detected",
|
||||
Title: "Captive portal detected",
|
||||
// High severity, because captive portals block all traffic and require user intervention.
|
||||
Severity: health.SeverityHigh,
|
||||
Text: health.StaticMessage("This network requires you to log in using your web browser."),
|
||||
ImpactsConnectivity: true,
|
||||
})
|
||||
|
||||
func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) {
|
||||
var tmr *time.Timer
|
||||
|
||||
maybeStartTimer := func() {
|
||||
// If there's an existing timer, nothing to do; just continue
|
||||
// waiting for it to expire. Otherwise, create a new timer.
|
||||
if tmr == nil {
|
||||
tmr = time.NewTimer(captivePortalDetectionInterval)
|
||||
}
|
||||
}
|
||||
maybeStopTimer := func() {
|
||||
if tmr == nil {
|
||||
return
|
||||
}
|
||||
if !tmr.Stop() {
|
||||
<-tmr.C
|
||||
}
|
||||
tmr = nil
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
maybeStopTimer()
|
||||
return
|
||||
}
|
||||
|
||||
// First, see if we have a signal on our "healthy" channel, which
|
||||
// takes priority over an existing timer. Because a select is
|
||||
// nondeterministic, we explicitly check this channel before
|
||||
// entering the main select below, so that we're guaranteed to
|
||||
// stop the timer before starting captive portal detection.
|
||||
select {
|
||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||
if needsCaptiveDetection {
|
||||
maybeStartTimer()
|
||||
} else {
|
||||
maybeStopTimer()
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
var timerChan <-chan time.Time
|
||||
if tmr != nil {
|
||||
timerChan = tmr.C
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// All done; stop the timer and then exit.
|
||||
maybeStopTimer()
|
||||
return
|
||||
case <-timerChan:
|
||||
// Kick off captive portal check
|
||||
b.performCaptiveDetection()
|
||||
// nil out timer to force recreation
|
||||
tmr = nil
|
||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||
if needsCaptiveDetection {
|
||||
maybeStartTimer()
|
||||
} else {
|
||||
// Healthy; cancel any existing timer
|
||||
maybeStopTimer()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRunCaptivePortalDetection reports whether captive portal detection
|
||||
// should be run. It is enabled by default, but can be disabled via a control
|
||||
// knob. It is also only run when the user explicitly wants the backend to be
|
||||
// running.
|
||||
func (b *LocalBackend) shouldRunCaptivePortalDetection() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning()
|
||||
}
|
||||
|
||||
// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs
|
||||
// the detection and updates the Warnable accordingly.
|
||||
func (b *LocalBackend) performCaptiveDetection() {
|
||||
if !b.shouldRunCaptivePortalDetection() {
|
||||
return
|
||||
}
|
||||
|
||||
d := captivedetection.NewDetector(b.logf)
|
||||
b.mu.Lock() // for b.hostinfo
|
||||
cn := b.currentNode()
|
||||
dm := cn.DERPMap()
|
||||
preferredDERP := 0
|
||||
if b.hostinfo != nil {
|
||||
if b.hostinfo.NetInfo != nil {
|
||||
preferredDERP = b.hostinfo.NetInfo.PreferredDERP
|
||||
}
|
||||
}
|
||||
ctx := b.ctx
|
||||
netMon := b.NetMon()
|
||||
b.mu.Unlock()
|
||||
found := d.Detect(ctx, netMon, dm, preferredDERP)
|
||||
if found {
|
||||
if !b.health.IsUnhealthy(captivePortalWarnable) {
|
||||
metricCaptivePortalDetected.Add(1)
|
||||
}
|
||||
b.health.SetUnhealthy(captivePortalWarnable, health.Args{})
|
||||
} else {
|
||||
b.health.SetHealthy(captivePortalWarnable)
|
||||
}
|
||||
}
|
||||
+15
-168
@@ -64,7 +64,6 @@ import (
|
||||
"tailscale.com/ipn/policy"
|
||||
"tailscale.com/log/sockstatlog"
|
||||
"tailscale.com/logpolicy"
|
||||
"tailscale.com/net/captivedetection"
|
||||
"tailscale.com/net/dns"
|
||||
"tailscale.com/net/dnscache"
|
||||
"tailscale.com/net/dnsfallback"
|
||||
@@ -168,8 +167,6 @@ type watchSession struct {
|
||||
cancel context.CancelFunc // to shut down the session
|
||||
}
|
||||
|
||||
var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected")
|
||||
|
||||
var (
|
||||
// errShutdown indicates that the [LocalBackend.Shutdown] was called.
|
||||
errShutdown = errors.New("shutting down")
|
||||
@@ -943,10 +940,6 @@ func (b *LocalBackend) DisconnectControl() {
|
||||
cc.Shutdown()
|
||||
}
|
||||
|
||||
// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken
|
||||
// before running captive portal detection.
|
||||
const captivePortalDetectionInterval = 2 * time.Second
|
||||
|
||||
// linkChange is our network monitor callback, called whenever the network changes.
|
||||
func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) {
|
||||
b.mu.Lock()
|
||||
@@ -1002,6 +995,12 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) {
|
||||
}
|
||||
}
|
||||
|
||||
// Captive portal detection hooks.
|
||||
var (
|
||||
hookCaptivePortalHealthChange feature.Hook[func(*LocalBackend, *health.State)]
|
||||
hookCheckCaptivePortalLoop feature.Hook[func(*LocalBackend, context.Context)]
|
||||
)
|
||||
|
||||
func (b *LocalBackend) onHealthChange(change health.Change) {
|
||||
if change.WarnableChanged {
|
||||
w := change.Warnable
|
||||
@@ -1019,45 +1018,8 @@ func (b *LocalBackend) onHealthChange(change health.Change) {
|
||||
Health: state,
|
||||
})
|
||||
|
||||
isConnectivityImpacted := false
|
||||
for _, w := range state.Warnings {
|
||||
// Ignore the captive portal warnable itself.
|
||||
if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code {
|
||||
isConnectivityImpacted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// captiveCtx can be changed, and is protected with 'mu'; grab that
|
||||
// before we start our select, below.
|
||||
//
|
||||
// It is guaranteed to be non-nil.
|
||||
b.mu.Lock()
|
||||
ctx := b.captiveCtx
|
||||
b.mu.Unlock()
|
||||
|
||||
// If the context is canceled, we don't need to do anything.
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if isConnectivityImpacted {
|
||||
b.logf("health: connectivity impacted; triggering captive portal detection")
|
||||
|
||||
// Ensure that we select on captiveCtx so that we can time out
|
||||
// triggering captive portal detection if the backend is shutdown.
|
||||
select {
|
||||
case b.needsCaptiveDetection <- true:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
} else {
|
||||
// If connectivity is not impacted, we know for sure we're not behind a captive portal,
|
||||
// so drop any warning, and signal that we don't need captive portal detection.
|
||||
b.health.SetHealthy(captivePortalWarnable)
|
||||
select {
|
||||
case b.needsCaptiveDetection <- false:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
if f, ok := hookCaptivePortalHealthChange.GetOk(); ok {
|
||||
f(b, state)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1115,7 +1077,7 @@ func (b *LocalBackend) Shutdown() {
|
||||
}
|
||||
b.shutdownCalled = true
|
||||
|
||||
if b.captiveCancel != nil {
|
||||
if buildfeatures.HasCaptivePortal && b.captiveCancel != nil {
|
||||
b.logf("canceling captive portal context")
|
||||
b.captiveCancel()
|
||||
}
|
||||
@@ -2767,123 +2729,6 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) {
|
||||
}
|
||||
}
|
||||
|
||||
// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected.
|
||||
var captivePortalWarnable = health.Register(&health.Warnable{
|
||||
Code: "captive-portal-detected",
|
||||
Title: "Captive portal detected",
|
||||
// High severity, because captive portals block all traffic and require user intervention.
|
||||
Severity: health.SeverityHigh,
|
||||
Text: health.StaticMessage("This network requires you to log in using your web browser."),
|
||||
ImpactsConnectivity: true,
|
||||
})
|
||||
|
||||
func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) {
|
||||
var tmr *time.Timer
|
||||
|
||||
maybeStartTimer := func() {
|
||||
// If there's an existing timer, nothing to do; just continue
|
||||
// waiting for it to expire. Otherwise, create a new timer.
|
||||
if tmr == nil {
|
||||
tmr = time.NewTimer(captivePortalDetectionInterval)
|
||||
}
|
||||
}
|
||||
maybeStopTimer := func() {
|
||||
if tmr == nil {
|
||||
return
|
||||
}
|
||||
if !tmr.Stop() {
|
||||
<-tmr.C
|
||||
}
|
||||
tmr = nil
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
maybeStopTimer()
|
||||
return
|
||||
}
|
||||
|
||||
// First, see if we have a signal on our "healthy" channel, which
|
||||
// takes priority over an existing timer. Because a select is
|
||||
// nondeterministic, we explicitly check this channel before
|
||||
// entering the main select below, so that we're guaranteed to
|
||||
// stop the timer before starting captive portal detection.
|
||||
select {
|
||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||
if needsCaptiveDetection {
|
||||
maybeStartTimer()
|
||||
} else {
|
||||
maybeStopTimer()
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
var timerChan <-chan time.Time
|
||||
if tmr != nil {
|
||||
timerChan = tmr.C
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// All done; stop the timer and then exit.
|
||||
maybeStopTimer()
|
||||
return
|
||||
case <-timerChan:
|
||||
// Kick off captive portal check
|
||||
b.performCaptiveDetection()
|
||||
// nil out timer to force recreation
|
||||
tmr = nil
|
||||
case needsCaptiveDetection := <-b.needsCaptiveDetection:
|
||||
if needsCaptiveDetection {
|
||||
maybeStartTimer()
|
||||
} else {
|
||||
// Healthy; cancel any existing timer
|
||||
maybeStopTimer()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs
|
||||
// the detection and updates the Warnable accordingly.
|
||||
func (b *LocalBackend) performCaptiveDetection() {
|
||||
if !b.shouldRunCaptivePortalDetection() {
|
||||
return
|
||||
}
|
||||
|
||||
d := captivedetection.NewDetector(b.logf)
|
||||
b.mu.Lock() // for b.hostinfo
|
||||
cn := b.currentNode()
|
||||
dm := cn.DERPMap()
|
||||
preferredDERP := 0
|
||||
if b.hostinfo != nil {
|
||||
if b.hostinfo.NetInfo != nil {
|
||||
preferredDERP = b.hostinfo.NetInfo.PreferredDERP
|
||||
}
|
||||
}
|
||||
ctx := b.ctx
|
||||
netMon := b.NetMon()
|
||||
b.mu.Unlock()
|
||||
found := d.Detect(ctx, netMon, dm, preferredDERP)
|
||||
if found {
|
||||
if !b.health.IsUnhealthy(captivePortalWarnable) {
|
||||
metricCaptivePortalDetected.Add(1)
|
||||
}
|
||||
b.health.SetUnhealthy(captivePortalWarnable, health.Args{})
|
||||
} else {
|
||||
b.health.SetHealthy(captivePortalWarnable)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRunCaptivePortalDetection reports whether captive portal detection
|
||||
// should be run. It is enabled by default, but can be disabled via a control
|
||||
// knob. It is also only run when the user explicitly wants the backend to be
|
||||
// running.
|
||||
func (b *LocalBackend) shouldRunCaptivePortalDetection() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning()
|
||||
}
|
||||
|
||||
// packetFilterPermitsUnlockedNodes reports any peer in peers with the
|
||||
// UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet
|
||||
// filter.
|
||||
@@ -5715,16 +5560,18 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock
|
||||
// Start a captive portal detection loop if none has been
|
||||
// started. Create a new context if none is present, since it
|
||||
// can be shut down if we transition away from Running.
|
||||
if b.captiveCancel == nil {
|
||||
b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx)
|
||||
b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) })
|
||||
if buildfeatures.HasCaptivePortal {
|
||||
if b.captiveCancel == nil {
|
||||
b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx)
|
||||
b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, b.captiveCtx) })
|
||||
}
|
||||
}
|
||||
} else if oldState == ipn.Running {
|
||||
// Transitioning away from running.
|
||||
b.closePeerAPIListenersLocked()
|
||||
|
||||
// Stop any existing captive portal detection loop.
|
||||
if b.captiveCancel != nil {
|
||||
if buildfeatures.HasCaptivePortal && b.captiveCancel != nil {
|
||||
b.captiveCancel()
|
||||
b.captiveCancel = nil
|
||||
|
||||
|
||||
Reference in New Issue
Block a user