Rework map session

This commit restructures the map session in to a struct
holding the state of what is needed during its lifetime.

For streaming sessions, the event loop is structured a
bit differently not hammering the clients with updates
but rather batching them over a short, configurable time
which should significantly improve cpu usage, and potentially
flakyness.

The use of Patch updates has been dialed back a little as
it does not look like its a 100% ready for prime time. Nodes
are now updated with full changes, except for a few things
like online status.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2024-02-23 10:59:24 +01:00 committed by Juan Font
parent dd693c444c
commit 58c94d2bd3
35 changed files with 1803 additions and 1716 deletions

View file

@ -83,7 +83,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@ -142,7 +142,7 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()

View file

@ -53,7 +53,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@ -92,7 +92,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()

View file

@ -65,7 +65,7 @@ func TestPingAllByIP(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@ -103,7 +103,7 @@ func TestPingAllByIPPublicDERP(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@ -135,7 +135,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
clientIPs := make(map[TailscaleClient][]netip.Addr)
for _, client := range allClients {
@ -176,7 +176,7 @@ func TestAuthKeyLogoutAndRelogin(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allClients, err = scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
@ -329,7 +329,7 @@ func TestPingAllByHostname(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
assertNoErrListFQDN(t, err)
@ -539,7 +539,7 @@ func TestResolveMagicDNS(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
@ -609,7 +609,7 @@ func TestExpireNode(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@ -711,7 +711,7 @@ func TestExpireNode(t *testing.T) {
}
}
func TestNodeOnlineLastSeenStatus(t *testing.T) {
func TestNodeOnlineStatus(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
@ -723,7 +723,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
"user1": len(MustTestVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("onlinelastseen"))
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("online"))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
@ -735,7 +735,7 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
assertClientsState(t, allClients)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
@ -755,8 +755,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
headscale, err := scenario.Headscale()
assertNoErr(t, err)
keepAliveInterval := 60 * time.Second
// Duration is chosen arbitrarily, 10m is reported in #1561
testDuration := 12 * time.Minute
start := time.Now()
@ -780,11 +778,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
err = json.Unmarshal([]byte(result), &nodes)
assertNoErr(t, err)
now := time.Now()
// Threshold with some leeway
lastSeenThreshold := now.Add(-keepAliveInterval - (10 * time.Second))
// Verify that headscale reports the nodes as online
for _, node := range nodes {
// All nodes should be online
@ -795,18 +788,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
node.GetName(),
time.Since(start),
)
lastSeen := node.GetLastSeen().AsTime()
// All nodes should have been last seen between now and the keepAliveInterval
assert.Truef(
t,
lastSeen.After(lastSeenThreshold),
"node (%s) lastSeen (%v) was not %s after the threshold (%v)",
node.GetName(),
lastSeen,
keepAliveInterval,
lastSeenThreshold,
)
}
// Verify that all nodes report all nodes to be online
@ -834,15 +815,6 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
client.Hostname(),
time.Since(start),
)
// from docs: last seen to tailcontrol; only present if offline
// assert.Nilf(
// t,
// peerStatus.LastSeen,
// "expected node %s to not have LastSeen set, got %s",
// peerStatus.HostName,
// peerStatus.LastSeen,
// )
}
}
@ -850,3 +822,87 @@ func TestNodeOnlineLastSeenStatus(t *testing.T) {
time.Sleep(time.Second)
}
}
// TestPingAllByIPManyUpDown is a variant of the PingAll
// test which will take the tailscale node up and down
// five times ensuring they are able to restablish connectivity.
func TestPingAllByIPManyUpDown(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assertNoErr(t, err)
defer scenario.Shutdown()
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
spec := map[string]int{
"user1": len(MustTestVersions),
"user2": len(MustTestVersions),
}
headscaleConfig := map[string]string{
"HEADSCALE_DERP_URLS": "",
"HEADSCALE_DERP_SERVER_ENABLED": "true",
"HEADSCALE_DERP_SERVER_REGION_ID": "999",
"HEADSCALE_DERP_SERVER_REGION_CODE": "headscale",
"HEADSCALE_DERP_SERVER_REGION_NAME": "Headscale Embedded DERP",
"HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR": "0.0.0.0:3478",
"HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH": "/tmp/derp.key",
// Envknob for enabling DERP debug logs
"DERP_DEBUG_LOGS": "true",
"DERP_PROBER_DEBUG_LOGS": "true",
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{},
hsic.WithTestName("pingallbyip"),
hsic.WithConfigEnv(headscaleConfig),
hsic.WithTLS(),
hsic.WithHostnameAsServerURL(),
)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
allIps, err := scenario.ListTailscaleClientsIPs()
assertNoErrListClientIPs(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
for run := range 3 {
t.Logf("Starting DownUpPing run %d", run+1)
for _, client := range allClients {
t.Logf("taking down %q", client.Hostname())
client.Down()
}
time.Sleep(5 * time.Second)
for _, client := range allClients {
t.Logf("bringing up %q", client.Hostname())
client.Up()
}
time.Sleep(5 * time.Second)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
}

View file

@ -212,7 +212,11 @@ func TestEnablingRoutes(t *testing.T) {
if route.GetId() == routeToBeDisabled.GetId() {
assert.Equal(t, false, route.GetEnabled())
assert.Equal(t, false, route.GetIsPrimary())
// since this is the only route of this cidr,
// it will not failover, and remain Primary
// until something can replace it.
assert.Equal(t, true, route.GetIsPrimary())
} else {
assert.Equal(t, true, route.GetEnabled())
assert.Equal(t, true, route.GetIsPrimary())
@ -291,6 +295,7 @@ func TestHASubnetRouterFailover(t *testing.T) {
client := allClients[2]
t.Logf("Advertise route from r1 (%s) and r2 (%s), making it HA, n1 is primary", subRouter1.Hostname(), subRouter2.Hostname())
// advertise HA route on node 1 and 2
// ID 1 will be primary
// ID 2 will be secondary
@ -384,12 +389,12 @@ func TestHASubnetRouterFailover(t *testing.T) {
// Node 1 is primary
assert.Equal(t, true, enablingRoutes[0].GetAdvertised())
assert.Equal(t, true, enablingRoutes[0].GetEnabled())
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary())
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary(), "both subnet routers are up, expected r1 to be primary")
// Node 2 is not primary
assert.Equal(t, true, enablingRoutes[1].GetAdvertised())
assert.Equal(t, true, enablingRoutes[1].GetEnabled())
assert.Equal(t, false, enablingRoutes[1].GetIsPrimary())
assert.Equal(t, false, enablingRoutes[1].GetIsPrimary(), "both subnet routers are up, expected r2 to be non-primary")
// Verify that the client has routes from the primary machine
srs1, err := subRouter1.Status()
@ -401,6 +406,9 @@ func TestHASubnetRouterFailover(t *testing.T) {
srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus := clientStatus.Peer[srs2.Self.PublicKey]
assert.True(t, srs1PeerStatus.Online, "r1 up, r2 up")
assert.True(t, srs2PeerStatus.Online, "r1 up, r2 up")
assertNotNil(t, srs1PeerStatus.PrimaryRoutes)
assert.Nil(t, srs2PeerStatus.PrimaryRoutes)
@ -411,7 +419,8 @@ func TestHASubnetRouterFailover(t *testing.T) {
)
// Take down the current primary
t.Logf("taking down subnet router 1 (%s)", subRouter1.Hostname())
t.Logf("taking down subnet router r1 (%s)", subRouter1.Hostname())
t.Logf("expecting r2 (%s) to take over as primary", subRouter2.Hostname())
err = subRouter1.Down()
assertNoErr(t, err)
@ -435,15 +444,12 @@ func TestHASubnetRouterFailover(t *testing.T) {
// Node 1 is not primary
assert.Equal(t, true, routesAfterMove[0].GetAdvertised())
assert.Equal(t, true, routesAfterMove[0].GetEnabled())
assert.Equal(t, false, routesAfterMove[0].GetIsPrimary())
assert.Equal(t, false, routesAfterMove[0].GetIsPrimary(), "r1 is down, expected r2 to be primary")
// Node 2 is primary
assert.Equal(t, true, routesAfterMove[1].GetAdvertised())
assert.Equal(t, true, routesAfterMove[1].GetEnabled())
assert.Equal(t, true, routesAfterMove[1].GetIsPrimary())
// TODO(kradalby): Check client status
// Route is expected to be on SR2
assert.Equal(t, true, routesAfterMove[1].GetIsPrimary(), "r1 is down, expected r2 to be primary")
srs2, err = subRouter2.Status()
@ -453,6 +459,9 @@ func TestHASubnetRouterFailover(t *testing.T) {
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
assert.False(t, srs1PeerStatus.Online, "r1 down, r2 down")
assert.True(t, srs2PeerStatus.Online, "r1 down, r2 up")
assert.Nil(t, srs1PeerStatus.PrimaryRoutes)
assertNotNil(t, srs2PeerStatus.PrimaryRoutes)
@ -465,7 +474,8 @@ func TestHASubnetRouterFailover(t *testing.T) {
}
// Take down subnet router 2, leaving none available
t.Logf("taking down subnet router 2 (%s)", subRouter2.Hostname())
t.Logf("taking down subnet router r2 (%s)", subRouter2.Hostname())
t.Logf("expecting r2 (%s) to remain primary, no other available", subRouter2.Hostname())
err = subRouter2.Down()
assertNoErr(t, err)
@ -489,14 +499,14 @@ func TestHASubnetRouterFailover(t *testing.T) {
// Node 1 is not primary
assert.Equal(t, true, routesAfterBothDown[0].GetAdvertised())
assert.Equal(t, true, routesAfterBothDown[0].GetEnabled())
assert.Equal(t, false, routesAfterBothDown[0].GetIsPrimary())
assert.Equal(t, false, routesAfterBothDown[0].GetIsPrimary(), "r1 and r2 is down, expected r2 to _still_ be primary")
// Node 2 is primary
// if the node goes down, but no other suitable route is
// available, keep the last known good route.
assert.Equal(t, true, routesAfterBothDown[1].GetAdvertised())
assert.Equal(t, true, routesAfterBothDown[1].GetEnabled())
assert.Equal(t, true, routesAfterBothDown[1].GetIsPrimary())
assert.Equal(t, true, routesAfterBothDown[1].GetIsPrimary(), "r1 and r2 is down, expected r2 to _still_ be primary")
// TODO(kradalby): Check client status
// Both are expected to be down
@ -508,6 +518,9 @@ func TestHASubnetRouterFailover(t *testing.T) {
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
assert.False(t, srs1PeerStatus.Online, "r1 down, r2 down")
assert.False(t, srs2PeerStatus.Online, "r1 down, r2 down")
assert.Nil(t, srs1PeerStatus.PrimaryRoutes)
assertNotNil(t, srs2PeerStatus.PrimaryRoutes)
@ -520,7 +533,8 @@ func TestHASubnetRouterFailover(t *testing.T) {
}
// Bring up subnet router 1, making the route available from there.
t.Logf("bringing up subnet router 1 (%s)", subRouter1.Hostname())
t.Logf("bringing up subnet router r1 (%s)", subRouter1.Hostname())
t.Logf("expecting r1 (%s) to take over as primary (only one online)", subRouter1.Hostname())
err = subRouter1.Up()
assertNoErr(t, err)
@ -544,12 +558,12 @@ func TestHASubnetRouterFailover(t *testing.T) {
// Node 1 is primary
assert.Equal(t, true, routesAfter1Up[0].GetAdvertised())
assert.Equal(t, true, routesAfter1Up[0].GetEnabled())
assert.Equal(t, true, routesAfter1Up[0].GetIsPrimary())
assert.Equal(t, true, routesAfter1Up[0].GetIsPrimary(), "r1 is back up, expected r1 to become be primary")
// Node 2 is not primary
assert.Equal(t, true, routesAfter1Up[1].GetAdvertised())
assert.Equal(t, true, routesAfter1Up[1].GetEnabled())
assert.Equal(t, false, routesAfter1Up[1].GetIsPrimary())
assert.Equal(t, false, routesAfter1Up[1].GetIsPrimary(), "r1 is back up, expected r1 to become be primary")
// Verify that the route is announced from subnet router 1
clientStatus, err = client.Status()
@ -558,6 +572,9 @@ func TestHASubnetRouterFailover(t *testing.T) {
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
assert.True(t, srs1PeerStatus.Online, "r1 is back up, r2 down")
assert.False(t, srs2PeerStatus.Online, "r1 is back up, r2 down")
assert.NotNil(t, srs1PeerStatus.PrimaryRoutes)
assert.Nil(t, srs2PeerStatus.PrimaryRoutes)
@ -570,7 +587,8 @@ func TestHASubnetRouterFailover(t *testing.T) {
}
// Bring up subnet router 2, should result in no change.
t.Logf("bringing up subnet router 2 (%s)", subRouter2.Hostname())
t.Logf("bringing up subnet router r2 (%s)", subRouter2.Hostname())
t.Logf("both online, expecting r1 (%s) to still be primary (no flapping)", subRouter1.Hostname())
err = subRouter2.Up()
assertNoErr(t, err)
@ -594,12 +612,12 @@ func TestHASubnetRouterFailover(t *testing.T) {
// Node 1 is not primary
assert.Equal(t, true, routesAfter2Up[0].GetAdvertised())
assert.Equal(t, true, routesAfter2Up[0].GetEnabled())
assert.Equal(t, true, routesAfter2Up[0].GetIsPrimary())
assert.Equal(t, true, routesAfter2Up[0].GetIsPrimary(), "r1 and r2 is back up, expected r1 to _still_ be primary")
// Node 2 is primary
assert.Equal(t, true, routesAfter2Up[1].GetAdvertised())
assert.Equal(t, true, routesAfter2Up[1].GetEnabled())
assert.Equal(t, false, routesAfter2Up[1].GetIsPrimary())
assert.Equal(t, false, routesAfter2Up[1].GetIsPrimary(), "r1 and r2 is back up, expected r1 to _still_ be primary")
// Verify that the route is announced from subnet router 1
clientStatus, err = client.Status()
@ -608,6 +626,9 @@ func TestHASubnetRouterFailover(t *testing.T) {
srs1PeerStatus = clientStatus.Peer[srs1.Self.PublicKey]
srs2PeerStatus = clientStatus.Peer[srs2.Self.PublicKey]
assert.True(t, srs1PeerStatus.Online, "r1 up, r2 up")
assert.True(t, srs2PeerStatus.Online, "r1 up, r2 up")
assert.NotNil(t, srs1PeerStatus.PrimaryRoutes)
assert.Nil(t, srs2PeerStatus.PrimaryRoutes)
@ -620,7 +641,8 @@ func TestHASubnetRouterFailover(t *testing.T) {
}
// Disable the route of subnet router 1, making it failover to 2
t.Logf("disabling route in subnet router 1 (%s)", subRouter1.Hostname())
t.Logf("disabling route in subnet router r1 (%s)", subRouter1.Hostname())
t.Logf("expecting route to failover to r2 (%s), which is still available", subRouter2.Hostname())
_, err = headscale.Execute(
[]string{
"headscale",
@ -648,7 +670,7 @@ func TestHASubnetRouterFailover(t *testing.T) {
assertNoErr(t, err)
assert.Len(t, routesAfterDisabling1, 2)
t.Logf("routes after disabling1 %#v", routesAfterDisabling1)
t.Logf("routes after disabling r1 %#v", routesAfterDisabling1)
// Node 1 is not primary
assert.Equal(t, true, routesAfterDisabling1[0].GetAdvertised())
@ -680,6 +702,7 @@ func TestHASubnetRouterFailover(t *testing.T) {
// enable the route of subnet router 1, no change expected
t.Logf("enabling route in subnet router 1 (%s)", subRouter1.Hostname())
t.Logf("both online, expecting r2 (%s) to still be primary (no flapping)", subRouter2.Hostname())
_, err = headscale.Execute(
[]string{
"headscale",
@ -736,7 +759,8 @@ func TestHASubnetRouterFailover(t *testing.T) {
}
// delete the route of subnet router 2, failover to one expected
t.Logf("deleting route in subnet router 2 (%s)", subRouter2.Hostname())
t.Logf("deleting route in subnet router r2 (%s)", subRouter2.Hostname())
t.Logf("expecting route to failover to r1 (%s)", subRouter1.Hostname())
_, err = headscale.Execute(
[]string{
"headscale",
@ -764,7 +788,7 @@ func TestHASubnetRouterFailover(t *testing.T) {
assertNoErr(t, err)
assert.Len(t, routesAfterDeleting2, 1)
t.Logf("routes after deleting2 %#v", routesAfterDeleting2)
t.Logf("routes after deleting r2 %#v", routesAfterDeleting2)
// Node 1 is primary
assert.Equal(t, true, routesAfterDeleting2[0].GetAdvertised())

View file

@ -50,6 +50,8 @@ var (
tailscaleVersions2021 = map[string]bool{
"head": true,
"unstable": true,
"1.60": true, // CapVer: 82
"1.58": true, // CapVer: 82
"1.56": true, // CapVer: 82
"1.54": true, // CapVer: 79
"1.52": true, // CapVer: 79

View file

@ -27,7 +27,7 @@ type TailscaleClient interface {
Down() error
IPs() ([]netip.Addr, error)
FQDN() (string, error)
Status() (*ipnstate.Status, error)
Status(...bool) (*ipnstate.Status, error)
Netmap() (*netmap.NetworkMap, error)
Netcheck() (*netcheck.Report, error)
WaitForNeedsLogin() error

View file

@ -9,6 +9,7 @@ import (
"log"
"net/netip"
"net/url"
"os"
"strconv"
"strings"
"time"
@ -503,7 +504,7 @@ func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
}
// Status returns the ipnstate.Status of the Tailscale instance.
func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
func (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) {
command := []string{
"tailscale",
"status",
@ -521,60 +522,70 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err)
}
err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_status.json", t.hostname), []byte(result), 0o755)
if err != nil {
return nil, fmt.Errorf("status netmap to /tmp/control: %w", err)
}
return &status, err
}
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
// Only works with Tailscale 1.56 and newer.
// Panics if version is lower then minimum.
// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
// if !util.TailscaleVersionNewerOrEqual("1.56", t.version) {
// panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version))
// }
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
if !util.TailscaleVersionNewerOrEqual("1.56", t.version) {
panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version))
}
// command := []string{
// "tailscale",
// "debug",
// "netmap",
// }
command := []string{
"tailscale",
"debug",
"netmap",
}
// result, stderr, err := t.Execute(command)
// if err != nil {
// fmt.Printf("stderr: %s\n", stderr)
// return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err)
// }
result, stderr, err := t.Execute(command)
if err != nil {
fmt.Printf("stderr: %s\n", stderr)
return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err)
}
// var nm netmap.NetworkMap
// err = json.Unmarshal([]byte(result), &nm)
// if err != nil {
// return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err)
// }
var nm netmap.NetworkMap
err = json.Unmarshal([]byte(result), &nm)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err)
}
// return &nm, err
// }
err = os.WriteFile(fmt.Sprintf("/tmp/control/%s_netmap.json", t.hostname), []byte(result), 0o755)
if err != nil {
return nil, fmt.Errorf("saving netmap to /tmp/control: %w", err)
}
return &nm, err
}
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
// This implementation is based on getting the netmap from `tailscale debug watch-ipn`
// as there seem to be some weirdness omitting endpoint and DERP info if we use
// Patch updates.
// This implementation works on all supported versions.
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
// watch-ipn will only give an update if something is happening,
// since we send keep alives, the worst case for this should be
// 1 minute, but set a slightly more conservative time.
ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute)
// func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
// // watch-ipn will only give an update if something is happening,
// // since we send keep alives, the worst case for this should be
// // 1 minute, but set a slightly more conservative time.
// ctx, _ := context.WithTimeout(context.Background(), 3*time.Minute)
notify, err := t.watchIPN(ctx)
if err != nil {
return nil, err
}
// notify, err := t.watchIPN(ctx)
// if err != nil {
// return nil, err
// }
if notify.NetMap == nil {
return nil, fmt.Errorf("no netmap present in ipn.Notify")
}
// if notify.NetMap == nil {
// return nil, fmt.Errorf("no netmap present in ipn.Notify")
// }
return notify.NetMap, nil
}
// return notify.NetMap, nil
// }
// watchIPN watches `tailscale debug watch-ipn` for a ipn.Notify object until
// it gets one that has a netmap.NetworkMap.

View file

@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
)
@ -154,11 +155,11 @@ func assertClientsState(t *testing.T, clients []TailscaleClient) {
func assertValidNetmap(t *testing.T, client TailscaleClient) {
t.Helper()
// if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) {
// t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version())
if !util.TailscaleVersionNewerOrEqual("1.56", client.Version()) {
t.Logf("%q has version %q, skipping netmap check...", client.Hostname(), client.Version())
// return
// }
return
}
t.Logf("Checking netmap of %q", client.Hostname())
@ -175,7 +176,11 @@ func assertValidNetmap(t *testing.T, client TailscaleClient) {
assert.NotEmptyf(t, netmap.SelfNode.AllowedIPs(), "%q does not have any allowed IPs", client.Hostname())
assert.NotEmptyf(t, netmap.SelfNode.Addresses(), "%q does not have any addresses", client.Hostname())
assert.Truef(t, *netmap.SelfNode.Online(), "%q is not online", client.Hostname())
if netmap.SelfNode.Online() != nil {
assert.Truef(t, *netmap.SelfNode.Online(), "%q is not online", client.Hostname())
} else {
t.Errorf("Online should not be nil for %s", client.Hostname())
}
assert.Falsef(t, netmap.SelfNode.Key().IsZero(), "%q does not have a valid NodeKey", client.Hostname())
assert.Falsef(t, netmap.SelfNode.Machine().IsZero(), "%q does not have a valid MachineKey", client.Hostname())
@ -213,7 +218,7 @@ func assertValidNetmap(t *testing.T, client TailscaleClient) {
// This test is not suitable for ACL/partial connection tests.
func assertValidStatus(t *testing.T, client TailscaleClient) {
t.Helper()
status, err := client.Status()
status, err := client.Status(true)
if err != nil {
t.Fatalf("getting status for %q: %s", client.Hostname(), err)
}