Redo DNS configuration (#2034)

this commit changes and streamlines the dns_config into a new
key, dns. It removes a combination of outdates and incompatible
configuration options that made it easy to confuse what headscale
could and could not do, or what to expect from ones configuration.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2024-08-19 11:41:05 +02:00 committed by GitHub
parent 022fb24cd9
commit ac8491efec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
25 changed files with 1036 additions and 453 deletions

246
integration/dns_test.go Normal file
View file

@ -0,0 +1,246 @@
package integration
import (
"fmt"
"strings"
"testing"
"time"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
)
func TestResolveMagicDNS(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario(dockertestMaxWait())
assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{
"magicdns1": len(MustTestVersions),
"magicdns2": len(MustTestVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
assertNoErrListFQDN(t, err)
_, err = scenario.ListTailscaleClientsIPs()
assertNoErrListClientIPs(t, err)
for _, client := range allClients {
for _, peer := range allClients {
// It is safe to ignore this error as we handled it when caching it
peerFQDN, _ := peer.FQDN()
assert.Equal(t, fmt.Sprintf("%s.headscale.net", peer.Hostname()), peerFQDN)
command := []string{
"tailscale",
"ip", peerFQDN,
}
result, _, err := client.Execute(command)
if err != nil {
t.Fatalf(
"failed to execute resolve/ip command %s from %s: %s",
peerFQDN,
client.Hostname(),
err,
)
}
ips, err := peer.IPs()
if err != nil {
t.Fatalf(
"failed to get ips for %s: %s",
peer.Hostname(),
err,
)
}
for _, ip := range ips {
if !strings.Contains(result, ip.String()) {
t.Fatalf("ip %s is not found in \n%s\n", ip.String(), result)
}
}
}
}
}
// TestValidateResolvConf validates that the resolv.conf file
// ends up as expected in our Tailscale containers.
// All the containers are based on Alpine, meaning Tailscale
// will overwrite the resolv.conf file.
// On other platform, Tailscale will integrate with a dns manager
// if available (like Systemd-Resolved).
func TestValidateResolvConf(t *testing.T) {
IntegrationSkip(t)
resolvconf := func(conf string) string {
return strings.ReplaceAll(`# resolv.conf(5) file generated by tailscale
# For more info, see https://tailscale.com/s/resolvconf-overwrite
# DO NOT EDIT THIS FILE BY HAND -- CHANGES WILL BE OVERWRITTEN
`+conf, "\t", "")
}
tests := []struct {
name string
conf map[string]string
wantConfCompareFunc func(*testing.T, string)
}{
// New config
{
name: "no-config",
conf: map[string]string{
"HEADSCALE_DNS_BASE_DOMAIN": "",
"HEADSCALE_DNS_MAGIC_DNS": "false",
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": "",
},
wantConfCompareFunc: func(t *testing.T, got string) {
assert.NotContains(t, got, "100.100.100.100")
},
},
{
name: "global-only",
conf: map[string]string{
"HEADSCALE_DNS_BASE_DOMAIN": "",
"HEADSCALE_DNS_MAGIC_DNS": "false",
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": "8.8.8.8 1.1.1.1",
},
wantConfCompareFunc: func(t *testing.T, got string) {
want := resolvconf(`
nameserver 100.100.100.100
`)
assert.Equal(t, want, got)
},
},
{
name: "base-integration-config",
conf: map[string]string{
"HEADSCALE_DNS_BASE_DOMAIN": "very-unique-domain.net",
},
wantConfCompareFunc: func(t *testing.T, got string) {
want := resolvconf(`
nameserver 100.100.100.100
search very-unique-domain.net
`)
assert.Equal(t, want, got)
},
},
{
name: "base-magic-dns-off",
conf: map[string]string{
"HEADSCALE_DNS_MAGIC_DNS": "false",
"HEADSCALE_DNS_BASE_DOMAIN": "very-unique-domain.net",
},
wantConfCompareFunc: func(t *testing.T, got string) {
want := resolvconf(`
nameserver 100.100.100.100
search very-unique-domain.net
`)
assert.Equal(t, want, got)
},
},
{
name: "base-extra-search-domains",
conf: map[string]string{
"HEADSCALE_DNS_SEARCH_DOMAINS": "test1.no test2.no",
"HEADSCALE_DNS_BASE_DOMAIN": "with-local-dns.net",
},
wantConfCompareFunc: func(t *testing.T, got string) {
want := resolvconf(`
nameserver 100.100.100.100
search with-local-dns.net test1.no test2.no
`)
assert.Equal(t, want, got)
},
},
{
name: "base-nameservers-split",
conf: map[string]string{
"HEADSCALE_DNS_NAMESERVERS_SPLIT": `{foo.bar.com: ["1.1.1.1"]}`,
"HEADSCALE_DNS_BASE_DOMAIN": "with-local-dns.net",
},
wantConfCompareFunc: func(t *testing.T, got string) {
want := resolvconf(`
nameserver 100.100.100.100
search with-local-dns.net
`)
assert.Equal(t, want, got)
},
},
{
name: "base-full-no-magic",
conf: map[string]string{
"HEADSCALE_DNS_MAGIC_DNS": "false",
"HEADSCALE_DNS_BASE_DOMAIN": "all-of.it",
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": `8.8.8.8`,
"HEADSCALE_DNS_SEARCH_DOMAINS": "test1.no test2.no",
// TODO(kradalby): this currently isnt working, need to fix it
// "HEADSCALE_DNS_NAMESERVERS_SPLIT": `{foo.bar.com: ["1.1.1.1"]}`,
// "HEADSCALE_DNS_EXTRA_RECORDS": `[{ name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.4" }]`,
},
wantConfCompareFunc: func(t *testing.T, got string) {
want := resolvconf(`
nameserver 100.100.100.100
search all-of.it test1.no test2.no
`)
assert.Equal(t, want, got)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
scenario, err := NewScenario(dockertestMaxWait())
assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{
"resolvconf1": 3,
"resolvconf2": 3,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("resolvconf"), hsic.WithConfigEnv(tt.conf))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
assertNoErrListFQDN(t, err)
_, err = scenario.ListTailscaleClientsIPs()
assertNoErrListClientIPs(t, err)
time.Sleep(30 * time.Second)
for _, client := range allClients {
b, err := client.ReadFile("/etc/resolv.conf")
assertNoErr(t, err)
t.Logf("comparing resolv conf of %s", client.Hostname())
tt.wantConfCompareFunc(t, string(b))
}
})
}
}

View file

@ -623,74 +623,6 @@ func TestTaildrop(t *testing.T) {
}
}
func TestResolveMagicDNS(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario(dockertestMaxWait())
assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{
"magicdns1": len(MustTestVersions),
"magicdns2": len(MustTestVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
// assertClientsState(t, allClients)
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
assertNoErrListFQDN(t, err)
_, err = scenario.ListTailscaleClientsIPs()
assertNoErrListClientIPs(t, err)
for _, client := range allClients {
for _, peer := range allClients {
// It is safe to ignore this error as we handled it when caching it
peerFQDN, _ := peer.FQDN()
command := []string{
"tailscale",
"ip", peerFQDN,
}
result, _, err := client.Execute(command)
if err != nil {
t.Fatalf(
"failed to execute resolve/ip command %s from %s: %s",
peerFQDN,
client.Hostname(),
err,
)
}
ips, err := peer.IPs()
if err != nil {
t.Fatalf(
"failed to get ips for %s: %s",
peer.Hostname(),
err,
)
}
for _, ip := range ips {
if !strings.Contains(result, ip.String()) {
t.Fatalf("ip %s is not found in \n%s\n", ip.String(), result)
}
}
}
}
}
func TestExpireNode(t *testing.T) {
IntegrationSkip(t)
t.Parallel()

View file

@ -2,104 +2,6 @@ package hsic
import "github.com/juanfont/headscale/hscontrol/types"
// const (
// defaultEphemeralNodeInactivityTimeout = time.Second * 30
// defaultNodeUpdateCheckInterval = time.Second * 10
// )
// TODO(kradalby): This approach doesnt work because we cannot
// serialise our config object to YAML or JSON.
// func DefaultConfig() headscale.Config {
// derpMap, _ := url.Parse("https://controlplane.tailscale.com/derpmap/default")
//
// config := headscale.Config{
// Log: headscale.LogConfig{
// Level: zerolog.TraceLevel,
// },
// ACL: headscale.GetACLConfig(),
// DBtype: "sqlite3",
// EphemeralNodeInactivityTimeout: defaultEphemeralNodeInactivityTimeout,
// NodeUpdateCheckInterval: defaultNodeUpdateCheckInterval,
// IPPrefixes: []netip.Prefix{
// netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
// netip.MustParsePrefix("100.64.0.0/10"),
// },
// DNSConfig: &tailcfg.DNSConfig{
// Proxied: true,
// Nameservers: []netip.Addr{
// netip.MustParseAddr("127.0.0.11"),
// netip.MustParseAddr("1.1.1.1"),
// },
// Resolvers: []*dnstype.Resolver{
// {
// Addr: "127.0.0.11",
// },
// {
// Addr: "1.1.1.1",
// },
// },
// },
// BaseDomain: "headscale.net",
//
// DBpath: "/tmp/integration_test_db.sqlite3",
//
// PrivateKeyPath: "/tmp/integration_private.key",
// NoisePrivateKeyPath: "/tmp/noise_integration_private.key",
// Addr: "0.0.0.0:8080",
// MetricsAddr: "127.0.0.1:9090",
// ServerURL: "http://headscale:8080",
//
// DERP: headscale.DERPConfig{
// URLs: []url.URL{
// *derpMap,
// },
// AutoUpdate: false,
// UpdateFrequency: 1 * time.Minute,
// },
// }
//
// return config
// }
// TODO: Reuse the actual configuration object above.
// Deprecated: use env function instead as it is easier to
// override.
func DefaultConfigYAML() string {
yaml := `
log:
level: trace
acl_policy_path: ""
database:
type: sqlite3
sqlite.path: /tmp/integration_test_db.sqlite3
ephemeral_node_inactivity_timeout: 30m
prefixes:
v6: fd7a:115c:a1e0::/48
v4: 100.64.0.0/10
dns_config:
base_domain: headscale.net
magic_dns: true
domains: []
nameservers:
- 127.0.0.11
- 1.1.1.1
private_key_path: /tmp/private.key
noise:
private_key_path: /tmp/noise_private.key
listen_addr: 0.0.0.0:8080
metrics_listen_addr: 127.0.0.1:9090
server_url: http://headscale:8080
derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: false
update_frequency: 1m
`
return yaml
}
func MinimumConfigYAML() string {
return `
private_key_path: /tmp/private.key
@ -117,10 +19,9 @@ func DefaultConfigEnv() map[string]string {
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT": "30m",
"HEADSCALE_PREFIXES_V4": "100.64.0.0/10",
"HEADSCALE_PREFIXES_V6": "fd7a:115c:a1e0::/48",
"HEADSCALE_DNS_CONFIG_BASE_DOMAIN": "headscale.net",
"HEADSCALE_DNS_CONFIG_MAGIC_DNS": "true",
"HEADSCALE_DNS_CONFIG_DOMAINS": "",
"HEADSCALE_DNS_CONFIG_NAMESERVERS": "127.0.0.11 1.1.1.1",
"HEADSCALE_DNS_BASE_DOMAIN": "headscale.net",
"HEADSCALE_DNS_MAGIC_DNS": "true",
"HEADSCALE_DNS_NAMESERVERS_GLOBAL": "127.0.0.11 1.1.1.1",
"HEADSCALE_PRIVATE_KEY_PATH": "/tmp/private.key",
"HEADSCALE_NOISE_PRIVATE_KEY_PATH": "/tmp/noise_private.key",
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:8080",

View file

@ -51,6 +51,8 @@ var (
tailscaleVersions2021 = map[string]bool{
"head": true,
"unstable": true,
"1.70": true, // CapVer: not checked
"1.68": true, // CapVer: not checked
"1.66": true, // CapVer: not checked
"1.64": true, // CapVer: not checked
"1.62": true, // CapVer: not checked
@ -62,10 +64,10 @@ var (
"1.50": true, // CapVer: 74
"1.48": true, // CapVer: 68
"1.46": true, // CapVer: 65
"1.44": true, // CapVer: 63
"1.42": true, // CapVer: 61
"1.40": true, // CapVer: 61
"1.38": true, // Oldest supported version, CapVer: 58
"1.44": false, // CapVer: 63
"1.42": false, // Oldest supported version, CapVer: 61
"1.40": false, // CapVer: 61
"1.38": false, // CapVer: 58
"1.36": false, // CapVer: 56
"1.34": false, // CapVer: 51
"1.32": false, // CapVer: 46

View file

@ -36,6 +36,7 @@ type TailscaleClient interface {
Ping(hostnameOrIP string, opts ...tsic.PingOption) error
Curl(url string, opts ...tsic.CurlOption) (string, error)
ID() string
ReadFile(path string) ([]byte, error)
// FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client
// and a bool indicating if the clients online count and peer count is equal.

View file

@ -1,6 +1,8 @@
package tsic
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"errors"
@ -998,3 +1000,41 @@ func (t *TailscaleInContainer) WriteFile(path string, data []byte) error {
func (t *TailscaleInContainer) SaveLog(path string) error {
return dockertestutil.SaveLog(t.pool, t.container, path)
}
// ReadFile reads a file from the Tailscale container.
// It returns the content of the file as a byte slice.
func (t *TailscaleInContainer) ReadFile(path string) ([]byte, error) {
tarBytes, err := integrationutil.FetchPathFromContainer(t.pool, t.container, path)
if err != nil {
return nil, fmt.Errorf("reading file from container: %w", err)
}
var out bytes.Buffer
tr := tar.NewReader(bytes.NewReader(tarBytes))
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return nil, fmt.Errorf("reading tar header: %w", err)
}
if !strings.Contains(path, hdr.Name) {
return nil, fmt.Errorf("file not found in tar archive, looking for: %s, header was: %s", path, hdr.Name)
}
if _, err := io.Copy(&out, tr); err != nil {
return nil, fmt.Errorf("copying file to buffer: %w", err)
}
// Only support reading the first tile
break
}
if out.Len() == 0 {
return nil, fmt.Errorf("file is empty")
}
return out.Bytes(), nil
}