Compare commits
No commits in common. "b8044c29ddc59d9c6346337d589b73a7e5b0511e" and "b50e10a1be94898e3f237fee35e9f57eea06eda8" have entirely different histories.
b8044c29dd
...
b50e10a1be
43 changed files with 5002 additions and 1838 deletions
14
.github/workflows/build.yml
vendored
14
.github/workflows/build.yml
vendored
|
@ -31,13 +31,10 @@ jobs:
|
||||||
- '**/*.go'
|
- '**/*.go'
|
||||||
- 'integration_test/'
|
- 'integration_test/'
|
||||||
- 'config-example.yaml'
|
- 'config-example.yaml'
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Run nix build
|
- name: Run nix build
|
||||||
id: build
|
id: build
|
||||||
|
@ -87,11 +84,8 @@ jobs:
|
||||||
- "GOARCH=amd64 GOOS=darwin"
|
- "GOARCH=amd64 GOOS=darwin"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Run go cross compile
|
- name: Run go cross compile
|
||||||
run: env ${{ matrix.env }} nix develop --command -- go build -o "headscale" ./cmd/headscale
|
run: env ${{ matrix.env }} nix develop --command -- go build -o "headscale" ./cmd/headscale
|
||||||
|
|
7
.github/workflows/check-tests.yaml
vendored
7
.github/workflows/check-tests.yaml
vendored
|
@ -24,13 +24,10 @@ jobs:
|
||||||
- '**/*.go'
|
- '**/*.go'
|
||||||
- 'integration_test/'
|
- 'integration_test/'
|
||||||
- 'config-example.yaml'
|
- 'config-example.yaml'
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Generate and check integration tests
|
- name: Generate and check integration tests
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
|
|
21
.github/workflows/lint.yml
vendored
21
.github/workflows/lint.yml
vendored
|
@ -24,13 +24,10 @@ jobs:
|
||||||
- '**/*.go'
|
- '**/*.go'
|
||||||
- 'integration_test/'
|
- 'integration_test/'
|
||||||
- 'config-example.yaml'
|
- 'config-example.yaml'
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
|
@ -58,13 +55,10 @@ jobs:
|
||||||
- '**/*.css'
|
- '**/*.css'
|
||||||
- '**/*.scss'
|
- '**/*.scss'
|
||||||
- '**/*.html'
|
- '**/*.html'
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Prettify code
|
- name: Prettify code
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
|
@ -74,11 +68,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Buf lint
|
- name: Buf lint
|
||||||
run: nix develop --command -- buf lint proto
|
run: nix develop --command -- buf lint proto
|
||||||
|
|
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
|
@ -30,11 +30,8 @@ jobs:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Run goreleaser
|
- name: Run goreleaser
|
||||||
run: nix develop --command -- goreleaser release --clean
|
run: nix develop --command -- goreleaser release --clean
|
||||||
|
|
9
.github/workflows/test-integration.yaml
vendored
9
.github/workflows/test-integration.yaml
vendored
|
@ -22,8 +22,6 @@ jobs:
|
||||||
- TestACLNamedHostsCanReach
|
- TestACLNamedHostsCanReach
|
||||||
- TestACLDevice1CanAccessDevice2
|
- TestACLDevice1CanAccessDevice2
|
||||||
- TestPolicyUpdateWhileRunningWithCLIInDatabase
|
- TestPolicyUpdateWhileRunningWithCLIInDatabase
|
||||||
- TestACLAutogroupMember
|
|
||||||
- TestACLAutogroupTagged
|
|
||||||
- TestAuthKeyLogoutAndReloginSameUser
|
- TestAuthKeyLogoutAndReloginSameUser
|
||||||
- TestAuthKeyLogoutAndReloginNewUser
|
- TestAuthKeyLogoutAndReloginNewUser
|
||||||
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
|
||||||
|
@ -116,13 +114,10 @@ jobs:
|
||||||
- name: Setup SSH server for Actor
|
- name: Setup SSH server for Actor
|
||||||
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
if: ${{ env.HAS_TAILSCALE_SECRET }}
|
||||||
uses: alexellis/setup-sshd-actor@master
|
uses: alexellis/setup-sshd-actor@master
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
- uses: satackey/action-docker-layer-caching@main
|
- uses: satackey/action-docker-layer-caching@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
|
|
7
.github/workflows/test.yml
vendored
7
.github/workflows/test.yml
vendored
|
@ -27,13 +27,10 @@ jobs:
|
||||||
- 'integration_test/'
|
- 'integration_test/'
|
||||||
- 'config-example.yaml'
|
- 'config-example.yaml'
|
||||||
|
|
||||||
- uses: nixbuild/nix-quick-install-action@master
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
- uses: nix-community/cache-nix-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
with:
|
|
||||||
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
|
|
||||||
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
if: steps.changed-files.outputs.files == 'true'
|
if: steps.changed-files.outputs.files == 'true'
|
||||||
|
|
|
@ -64,15 +64,8 @@ nfpms:
|
||||||
vendor: headscale
|
vendor: headscale
|
||||||
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
|
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
|
||||||
homepage: https://github.com/juanfont/headscale
|
homepage: https://github.com/juanfont/headscale
|
||||||
description: |-
|
license: BSD
|
||||||
Open source implementation of the Tailscale control server.
|
|
||||||
Headscale aims to implement a self-hosted, open source alternative to the
|
|
||||||
Tailscale control server. Headscale's goal is to provide self-hosters and
|
|
||||||
hobbyists with an open-source server they can use for their projects and
|
|
||||||
labs. It implements a narrow scope, a single Tailscale network (tailnet),
|
|
||||||
suitable for a personal use, or a small open-source organisation.
|
|
||||||
bindir: /usr/bin
|
bindir: /usr/bin
|
||||||
section: net
|
|
||||||
formats:
|
formats:
|
||||||
- deb
|
- deb
|
||||||
contents:
|
contents:
|
||||||
|
@ -81,21 +74,15 @@ nfpms:
|
||||||
type: config|noreplace
|
type: config|noreplace
|
||||||
file_info:
|
file_info:
|
||||||
mode: 0644
|
mode: 0644
|
||||||
- src: ./packaging/systemd/headscale.service
|
- src: ./docs/packaging/headscale.systemd.service
|
||||||
dst: /usr/lib/systemd/system/headscale.service
|
dst: /usr/lib/systemd/system/headscale.service
|
||||||
- dst: /var/lib/headscale
|
- dst: /var/lib/headscale
|
||||||
type: dir
|
type: dir
|
||||||
- src: LICENSE
|
- dst: /var/run/headscale
|
||||||
dst: /usr/share/doc/headscale/copyright
|
type: dir
|
||||||
scripts:
|
scripts:
|
||||||
postinstall: ./packaging/deb/postinst
|
postinstall: ./docs/packaging/postinstall.sh
|
||||||
postremove: ./packaging/deb/postrm
|
postremove: ./docs/packaging/postremove.sh
|
||||||
preremove: ./packaging/deb/prerm
|
|
||||||
deb:
|
|
||||||
lintian_overrides:
|
|
||||||
- no-changelog # Our CHANGELOG.md uses a different formatting
|
|
||||||
- no-manual-page
|
|
||||||
- statically-linked-binary
|
|
||||||
|
|
||||||
kos:
|
kos:
|
||||||
- id: ghcr
|
- id: ghcr
|
||||||
|
|
|
@ -7,13 +7,6 @@
|
||||||
- Policy: Zero or empty destination port is no longer allowed
|
- Policy: Zero or empty destination port is no longer allowed
|
||||||
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
[#2606](https://github.com/juanfont/headscale/pull/2606)
|
||||||
|
|
||||||
### Changes
|
|
||||||
|
|
||||||
- Remove policy v1 code
|
|
||||||
[#2600](https://github.com/juanfont/headscale/pull/2600)
|
|
||||||
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
|
|
||||||
[#2614](https://github.com/juanfont/headscale/pull/2614)
|
|
||||||
|
|
||||||
## 0.26.0 (2025-05-14)
|
## 0.26.0 (2025-05-14)
|
||||||
|
|
||||||
### BREAKING
|
### BREAKING
|
||||||
|
@ -162,8 +155,6 @@ working in v1 and not tested might be broken in v2 (and vice versa).
|
||||||
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
[#2438](https://github.com/juanfont/headscale/pull/2438)
|
||||||
- Add documentation for routes
|
- Add documentation for routes
|
||||||
[#2496](https://github.com/juanfont/headscale/pull/2496)
|
[#2496](https://github.com/juanfont/headscale/pull/2496)
|
||||||
- Add support for `autogroup:member`, `autogroup:tagged`
|
|
||||||
[#2572](https://github.com/juanfont/headscale/pull/2572)
|
|
||||||
|
|
||||||
## 0.25.1 (2025-02-25)
|
## 0.25.1 (2025-02-25)
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ provides on overview of Headscale's feature and compatibility with the Tailscale
|
||||||
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
|
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
|
||||||
- [x] ACL management via API
|
- [x] ACL management via API
|
||||||
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
|
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
|
||||||
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
|
`autogroup:nonroot`
|
||||||
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
|
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
|
||||||
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
|
||||||
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
|
||||||
|
|
5
docs/packaging/README.md
Normal file
5
docs/packaging/README.md
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
# Packaging
|
||||||
|
|
||||||
|
We use [nFPM](https://nfpm.goreleaser.com/) for making `.deb`, `.rpm` and `.apk`.
|
||||||
|
|
||||||
|
This folder contains files we need to package with these releases.
|
|
@ -1,4 +1,5 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
|
After=syslog.target
|
||||||
After=network.target
|
After=network.target
|
||||||
Description=headscale coordination server for Tailscale
|
Description=headscale coordination server for Tailscale
|
||||||
X-Restart-Triggers=/etc/headscale/config.yaml
|
X-Restart-Triggers=/etc/headscale/config.yaml
|
||||||
|
@ -13,7 +14,7 @@ Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
|
|
||||||
WorkingDirectory=/var/lib/headscale
|
WorkingDirectory=/var/lib/headscale
|
||||||
ReadWritePaths=/var/lib/headscale
|
ReadWritePaths=/var/lib/headscale /var/run
|
||||||
|
|
||||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN
|
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN
|
88
docs/packaging/postinstall.sh
Normal file
88
docs/packaging/postinstall.sh
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# Determine OS platform
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. /etc/os-release
|
||||||
|
|
||||||
|
HEADSCALE_EXE="/usr/bin/headscale"
|
||||||
|
BSD_HIER=""
|
||||||
|
HEADSCALE_RUN_DIR="/var/run/headscale"
|
||||||
|
HEADSCALE_HOME_DIR="/var/lib/headscale"
|
||||||
|
HEADSCALE_USER="headscale"
|
||||||
|
HEADSCALE_GROUP="headscale"
|
||||||
|
HEADSCALE_SHELL="/usr/sbin/nologin"
|
||||||
|
|
||||||
|
ensure_sudo() {
|
||||||
|
if [ "$(id -u)" = "0" ]; then
|
||||||
|
echo "Sudo permissions detected"
|
||||||
|
else
|
||||||
|
echo "No sudo permission detected, please run as sudo"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_headscale_path() {
|
||||||
|
if [ ! -f "$HEADSCALE_EXE" ]; then
|
||||||
|
echo "headscale not in default path, exiting..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "Found headscale %s\n" "$HEADSCALE_EXE"
|
||||||
|
}
|
||||||
|
|
||||||
|
create_headscale_user() {
|
||||||
|
printf "PostInstall: Adding headscale user %s\n" "$HEADSCALE_USER"
|
||||||
|
useradd -r -s "$HEADSCALE_SHELL" -d "$HEADSCALE_HOME_DIR" -c "headscale default user" "$HEADSCALE_USER"
|
||||||
|
}
|
||||||
|
|
||||||
|
create_headscale_group() {
|
||||||
|
if command -V systemctl >/dev/null 2>&1; then
|
||||||
|
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||||
|
groupadd -r "$HEADSCALE_GROUP"
|
||||||
|
|
||||||
|
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||||
|
usermod -a -G "$HEADSCALE_GROUP" "$HEADSCALE_USER"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$ID" = "alpine" ]; then
|
||||||
|
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||||
|
addgroup -S "$HEADSCALE_GROUP"
|
||||||
|
|
||||||
|
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||||
|
addgroup "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
create_run_dir() {
|
||||||
|
printf "PostInstall: Creating headscale run directory \n"
|
||||||
|
mkdir -p "$HEADSCALE_RUN_DIR"
|
||||||
|
|
||||||
|
printf "PostInstall: Modifying group ownership of headscale run directory \n"
|
||||||
|
chown "$HEADSCALE_USER":"$HEADSCALE_GROUP" "$HEADSCALE_RUN_DIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
summary() {
|
||||||
|
echo "----------------------------------------------------------------------"
|
||||||
|
echo " headscale package has been successfully installed."
|
||||||
|
echo ""
|
||||||
|
echo " Please follow the next steps to start the software:"
|
||||||
|
echo ""
|
||||||
|
echo " sudo systemctl enable headscale"
|
||||||
|
echo " sudo systemctl start headscale"
|
||||||
|
echo ""
|
||||||
|
echo " Configuration settings can be adjusted here:"
|
||||||
|
echo " ${BSD_HIER}/etc/headscale/config.yaml"
|
||||||
|
echo ""
|
||||||
|
echo "----------------------------------------------------------------------"
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main body of the script
|
||||||
|
#
|
||||||
|
{
|
||||||
|
ensure_sudo
|
||||||
|
ensure_headscale_path
|
||||||
|
create_headscale_user
|
||||||
|
create_headscale_group
|
||||||
|
create_run_dir
|
||||||
|
summary
|
||||||
|
}
|
15
docs/packaging/postremove.sh
Normal file
15
docs/packaging/postremove.sh
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# Determine OS platform
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. /etc/os-release
|
||||||
|
|
||||||
|
if command -V systemctl >/dev/null 2>&1; then
|
||||||
|
echo "Stop and disable headscale service"
|
||||||
|
systemctl stop headscale >/dev/null 2>&1 || true
|
||||||
|
systemctl disable headscale >/dev/null 2>&1 || true
|
||||||
|
echo "Running daemon-reload"
|
||||||
|
systemctl daemon-reload || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Removing run directory"
|
||||||
|
rm -rf "/var/run/headscale.sock"
|
|
@ -5,11 +5,10 @@
|
||||||
This page contains community contributions. The projects listed here are not
|
This page contains community contributions. The projects listed here are not
|
||||||
maintained by the headscale authors and are written by community members.
|
maintained by the headscale authors and are written by community members.
|
||||||
|
|
||||||
This page collects third-party tools, client libraries, and scripts related to headscale.
|
This page collects third-party tools and scripts related to headscale.
|
||||||
|
|
||||||
| Name | Repository Link | Description |
|
| Name | Repository Link | Description |
|
||||||
| --------------------- | --------------------------------------------------------------- | -------------------------------------------------------------------- |
|
| --------------------- | --------------------------------------------------------------- | -------------------------------------------------------------------- |
|
||||||
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
|
||||||
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
|
||||||
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
|
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
|
||||||
| headscale-client-go | [Github](https://github.com/hibare/headscale-client-go) | A Go client implementation for the Headscale HTTP API. |
|
|
||||||
|
|
|
@ -7,14 +7,13 @@
|
||||||
|
|
||||||
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
|
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
|
||||||
|
|
||||||
| Name | Repository Link | Description |
|
| Name | Repository Link | Description |
|
||||||
| ---------------------- | ----------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
|
| ---------------------- | ---------------------------------------------------------- | ------------------------------------------------------------------------------------ |
|
||||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
|
||||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
|
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
|
||||||
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
|
||||||
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
|
||||||
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
|
||||||
| unraid-headscale-admin | [Github](https://github.com/ich777/unraid-headscale-admin) | A simple headscale admin UI for Unraid, it offers Local (`docker exec`) and API Mode |
|
| unraid-headscale-admin | [Github](https://github.com/ich777/unraid-headscale-admin) | A simple headscale admin UI for Unraid, it offers Local (`docker exec`) and API Mode |
|
||||||
| headscale-console | [Github](https://github.com/rickli-cloud/headscale-console) | WebAssembly-based client supporting SSH, VNC and RDP with optional self-service capabilities |
|
|
||||||
|
|
||||||
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.
|
||||||
|
|
|
@ -7,7 +7,7 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
|
||||||
|
|
||||||
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
|
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
|
||||||
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
|
||||||
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
|
distributions are Ubuntu 20.04 or newer, Debian 11 or newer.
|
||||||
|
|
||||||
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
|
||||||
|
|
||||||
|
@ -87,8 +87,8 @@ managed by systemd.
|
||||||
sudo nano /etc/headscale/config.yaml
|
sudo nano /etc/headscale/config.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)
|
1. Copy [headscale's systemd service file](../../packaging/headscale.systemd.service) to
|
||||||
to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
`/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
|
||||||
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
|
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
|
||||||
|
|
||||||
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the
|
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the
|
||||||
|
|
|
@ -435,7 +435,8 @@ func TestAutoApproveRoutes(t *testing.T) {
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
pmfs := policy.PolicyManagerFuncsForTest([]byte(tt.acl))
|
pmfs := policy.PolicyManagerFuncsForTest([]byte(tt.acl))
|
||||||
for i, pmf := range pmfs {
|
for i, pmf := range pmfs {
|
||||||
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
|
version := i + 1
|
||||||
|
t.Run(fmt.Sprintf("%s-policyv%d", tt.name, version), func(t *testing.T) {
|
||||||
adb, err := newSQLiteTestDB()
|
adb, err := newSQLiteTestDB()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ func Test_fullMapResponse(t *testing.T) {
|
||||||
// {
|
// {
|
||||||
// name: "empty-node",
|
// name: "empty-node",
|
||||||
// node: types.Node{},
|
// node: types.Node{},
|
||||||
// pol: &policyv2.Policy{},
|
// pol: &policyv1.ACLPolicy{},
|
||||||
// dnsConfig: &tailcfg.DNSConfig{},
|
// dnsConfig: &tailcfg.DNSConfig{},
|
||||||
// baseDomain: "",
|
// baseDomain: "",
|
||||||
// want: nil,
|
// want: nil,
|
||||||
|
|
|
@ -5,11 +5,17 @@ import (
|
||||||
|
|
||||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||||
|
|
||||||
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||||
"github.com/juanfont/headscale/hscontrol/types"
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
|
"tailscale.com/envknob"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
polv1 = envknob.Bool("HEADSCALE_POLICY_V1")
|
||||||
|
)
|
||||||
|
|
||||||
type PolicyManager interface {
|
type PolicyManager interface {
|
||||||
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
|
// Filter returns the current filter rules for the entire tailnet and the associated matchers.
|
||||||
Filter() ([]tailcfg.FilterRule, []matcher.Match)
|
Filter() ([]tailcfg.FilterRule, []matcher.Match)
|
||||||
|
@ -27,13 +33,21 @@ type PolicyManager interface {
|
||||||
DebugString() string
|
DebugString() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPolicyManager returns a new policy manager.
|
// NewPolicyManager returns a new policy manager, the version is determined by
|
||||||
|
// the environment flag "HEADSCALE_POLICY_V1".
|
||||||
func NewPolicyManager(pol []byte, users []types.User, nodes types.Nodes) (PolicyManager, error) {
|
func NewPolicyManager(pol []byte, users []types.User, nodes types.Nodes) (PolicyManager, error) {
|
||||||
var polMan PolicyManager
|
var polMan PolicyManager
|
||||||
var err error
|
var err error
|
||||||
polMan, err = policyv2.NewPolicyManager(pol, users, nodes)
|
if polv1 {
|
||||||
if err != nil {
|
polMan, err = policyv1.NewPolicyManager(pol, users, nodes)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
polMan, err = policyv2.NewPolicyManager(pol, users, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return polMan, err
|
return polMan, err
|
||||||
|
@ -59,6 +73,9 @@ func PolicyManagersForTest(pol []byte, users []types.User, nodes types.Nodes) ([
|
||||||
func PolicyManagerFuncsForTest(pol []byte) []func([]types.User, types.Nodes) (PolicyManager, error) {
|
func PolicyManagerFuncsForTest(pol []byte) []func([]types.User, types.Nodes) (PolicyManager, error) {
|
||||||
var polmanFuncs []func([]types.User, types.Nodes) (PolicyManager, error)
|
var polmanFuncs []func([]types.User, types.Nodes) (PolicyManager, error)
|
||||||
|
|
||||||
|
polmanFuncs = append(polmanFuncs, func(u []types.User, n types.Nodes) (PolicyManager, error) {
|
||||||
|
return policyv1.NewPolicyManager(pol, u, n)
|
||||||
|
})
|
||||||
polmanFuncs = append(polmanFuncs, func(u []types.User, n types.Nodes) (PolicyManager, error) {
|
polmanFuncs = append(polmanFuncs, func(u []types.User, n types.Nodes) (PolicyManager, error) {
|
||||||
return policyv2.NewPolicyManager(pol, u, n)
|
return policyv2.NewPolicyManager(pol, u, n)
|
||||||
})
|
})
|
||||||
|
|
|
@ -490,6 +490,18 @@ func TestReduceFilterRules(t *testing.T) {
|
||||||
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
|
||||||
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||||
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
|
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
|
||||||
|
// This should not be included I believe, seems like
|
||||||
|
// this is a bug in the v1 code.
|
||||||
|
// For example:
|
||||||
|
// If a src or dst includes "64.0.0.0/2:*", it will include 100.64/16 range, which
|
||||||
|
// means that it will need to fetch the IPv6 addrs of the node to include the full range.
|
||||||
|
// Clearly, if a user sets the dst to be "64.0.0.0/2:*", it is likely more of a exit node
|
||||||
|
// and this would be strange behaviour.
|
||||||
|
// TODO(kradalby): Remove before launch.
|
||||||
|
{IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny},
|
||||||
|
{IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny},
|
||||||
|
{IP: "fd7a:115c:a1e0::100/128", Ports: tailcfg.PortRangeAny},
|
||||||
|
// End
|
||||||
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
|
||||||
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
|
||||||
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
|
||||||
|
@ -812,7 +824,8 @@ func TestReduceFilterRules(t *testing.T) {
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.pol)) {
|
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.pol)) {
|
||||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
version := idx + 1
|
||||||
|
t.Run(fmt.Sprintf("%s-v%d", tt.name, version), func(t *testing.T) {
|
||||||
var pm PolicyManager
|
var pm PolicyManager
|
||||||
var err error
|
var err error
|
||||||
pm, err = pmf(users, append(tt.peers, tt.node))
|
pm, err = pmf(users, append(tt.peers, tt.node))
|
||||||
|
@ -1631,6 +1644,10 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
wantSSH *tailcfg.SSHPolicy
|
wantSSH *tailcfg.SSHPolicy
|
||||||
expectErr bool
|
expectErr bool
|
||||||
errorMessage string
|
errorMessage string
|
||||||
|
|
||||||
|
// There are some tests that will not pass on V1 since we do not
|
||||||
|
// have the same kind of error handling as V2, so we skip them.
|
||||||
|
skipV1 bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "group-to-user",
|
name: "group-to-user",
|
||||||
|
@ -1664,6 +1681,10 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
|
// It looks like the group implementation in v1 is broken, so
|
||||||
|
// we skip this test for v1 and not let it hold up v2 replacing it.
|
||||||
|
skipV1: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "group-to-tag",
|
name: "group-to-tag",
|
||||||
|
@ -1701,6 +1722,10 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
|
// It looks like the group implementation in v1 is broken, so
|
||||||
|
// we skip this test for v1 and not let it hold up v2 replacing it.
|
||||||
|
skipV1: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "tag-to-user",
|
name: "tag-to-user",
|
||||||
|
@ -1801,6 +1826,10 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
|
|
||||||
|
// It looks like the group implementation in v1 is broken, so
|
||||||
|
// we skip this test for v1 and not let it hold up v2 replacing it.
|
||||||
|
skipV1: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "check-period-specified",
|
name: "check-period-specified",
|
||||||
|
@ -1872,6 +1901,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
}`,
|
}`,
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
errorMessage: `SSH action "invalid" is not valid, must be accept or check`,
|
errorMessage: `SSH action "invalid" is not valid, must be accept or check`,
|
||||||
|
skipV1: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid-check-period",
|
name: "invalid-check-period",
|
||||||
|
@ -1890,6 +1920,7 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
}`,
|
}`,
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
errorMessage: "not a valid duration string",
|
errorMessage: "not a valid duration string",
|
||||||
|
skipV1: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "multiple-ssh-users-with-autogroup",
|
name: "multiple-ssh-users-with-autogroup",
|
||||||
|
@ -1941,12 +1972,18 @@ func TestSSHPolicyRules(t *testing.T) {
|
||||||
}`,
|
}`,
|
||||||
expectErr: true,
|
expectErr: true,
|
||||||
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
|
errorMessage: "autogroup \"autogroup:invalid\" is not supported",
|
||||||
|
skipV1: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
|
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.policy)) {
|
||||||
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
|
version := idx + 1
|
||||||
|
t.Run(fmt.Sprintf("%s-v%d", tt.name, version), func(t *testing.T) {
|
||||||
|
if version == 1 && tt.skipV1 {
|
||||||
|
t.Skip()
|
||||||
|
}
|
||||||
|
|
||||||
var pm PolicyManager
|
var pm PolicyManager
|
||||||
var err error
|
var err error
|
||||||
pm, err = pmf(users, append(tt.peers, &tt.targetNode))
|
pm, err = pmf(users, append(tt.peers, &tt.targetNode))
|
||||||
|
|
|
@ -60,6 +60,7 @@ func TestNodeCanApproveRoute(t *testing.T) {
|
||||||
route netip.Prefix
|
route netip.Prefix
|
||||||
policy string
|
policy string
|
||||||
canApprove bool
|
canApprove bool
|
||||||
|
skipV1 bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "allow-all-routes-for-admin-user",
|
name: "allow-all-routes-for-admin-user",
|
||||||
|
@ -765,10 +766,10 @@ func TestNodeCanApproveRoute(t *testing.T) {
|
||||||
canApprove: false,
|
canApprove: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "empty-policy",
|
name: "empty-policy",
|
||||||
node: normalNode,
|
node: normalNode,
|
||||||
route: p("192.168.1.0/24"),
|
route: p("192.168.1.0/24"),
|
||||||
policy: `{"acls":[{"action":"accept","src":["*"],"dst":["*:*"]}]}`,
|
policy: `{"acls":[{"action":"accept","src":["*"],"dst":["*:*"]}]}`,
|
||||||
canApprove: false,
|
canApprove: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -788,7 +789,13 @@ func TestNodeCanApproveRoute(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, pm := range policyManagers {
|
for i, pm := range policyManagers {
|
||||||
t.Run(fmt.Sprintf("policy-index%d", i), func(t *testing.T) {
|
versionNum := i + 1
|
||||||
|
if versionNum == 1 && tt.skipV1 {
|
||||||
|
// Skip V1 policy manager for specific tests
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(fmt.Sprintf("PolicyV%d", versionNum), func(t *testing.T) {
|
||||||
result := pm.NodeCanApproveRoute(&tt.node, tt.route)
|
result := pm.NodeCanApproveRoute(&tt.node, tt.route)
|
||||||
|
|
||||||
if diff := cmp.Diff(tt.canApprove, result); diff != "" {
|
if diff := cmp.Diff(tt.canApprove, result); diff != "" {
|
||||||
|
|
996
hscontrol/policy/v1/acls.go
Normal file
996
hscontrol/policy/v1/acls.go
Normal file
|
@ -0,0 +1,996 @@
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
"github.com/tailscale/hujson"
|
||||||
|
"go4.org/netipx"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrEmptyPolicy = errors.New("empty policy")
|
||||||
|
ErrInvalidAction = errors.New("invalid action")
|
||||||
|
ErrInvalidGroup = errors.New("invalid group")
|
||||||
|
ErrInvalidTag = errors.New("invalid tag")
|
||||||
|
ErrInvalidPortFormat = errors.New("invalid port format")
|
||||||
|
ErrWildcardIsNeeded = errors.New("wildcard as port is required for the protocol")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
portRangeBegin = 0
|
||||||
|
portRangeEnd = 65535
|
||||||
|
expectedTokenItems = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// For some reason golang.org/x/net/internal/iana is an internal package.
|
||||||
|
const (
|
||||||
|
protocolICMP = 1 // Internet Control Message
|
||||||
|
protocolIGMP = 2 // Internet Group Management
|
||||||
|
protocolIPv4 = 4 // IPv4 encapsulation
|
||||||
|
protocolTCP = 6 // Transmission Control
|
||||||
|
protocolEGP = 8 // Exterior Gateway Protocol
|
||||||
|
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||||
|
protocolUDP = 17 // User Datagram
|
||||||
|
protocolGRE = 47 // Generic Routing Encapsulation
|
||||||
|
protocolESP = 50 // Encap Security Payload
|
||||||
|
protocolAH = 51 // Authentication Header
|
||||||
|
protocolIPv6ICMP = 58 // ICMP for IPv6
|
||||||
|
protocolSCTP = 132 // Stream Control Transmission Protocol
|
||||||
|
ProtocolFC = 133 // Fibre Channel
|
||||||
|
)
|
||||||
|
|
||||||
|
// LoadACLPolicyFromPath loads the ACL policy from the specify path, and generates the ACL rules.
|
||||||
|
func LoadACLPolicyFromPath(path string) (*ACLPolicy, error) {
|
||||||
|
log.Debug().
|
||||||
|
Str("func", "LoadACLPolicy").
|
||||||
|
Str("path", path).
|
||||||
|
Msg("Loading ACL policy from path")
|
||||||
|
|
||||||
|
policyFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer policyFile.Close()
|
||||||
|
|
||||||
|
policyBytes, err := io.ReadAll(policyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().
|
||||||
|
Str("path", path).
|
||||||
|
Bytes("file", policyBytes).
|
||||||
|
Msg("Loading ACLs")
|
||||||
|
|
||||||
|
return LoadACLPolicyFromBytes(policyBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadACLPolicyFromBytes(acl []byte) (*ACLPolicy, error) {
|
||||||
|
var policy ACLPolicy
|
||||||
|
|
||||||
|
ast, err := hujson.Parse(acl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing hujson, err: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ast.Standardize()
|
||||||
|
acl = ast.Pack()
|
||||||
|
|
||||||
|
if err := json.Unmarshal(acl, &policy); err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshalling policy, err: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if policy.IsZero() {
|
||||||
|
return nil, ErrEmptyPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
return &policy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateFilterAndSSHRulesForTests(
|
||||||
|
policy *ACLPolicy,
|
||||||
|
node *types.Node,
|
||||||
|
peers types.Nodes,
|
||||||
|
users []types.User,
|
||||||
|
) ([]tailcfg.FilterRule, *tailcfg.SSHPolicy, error) {
|
||||||
|
// If there is no policy defined, we default to allow all
|
||||||
|
if policy == nil {
|
||||||
|
return tailcfg.FilterAllowAll, &tailcfg.SSHPolicy{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rules, err := policy.CompileFilterRules(users, append(peers, node))
|
||||||
|
if err != nil {
|
||||||
|
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace().Interface("ACL", rules).Str("node", node.GivenName).Msg("ACL rules")
|
||||||
|
|
||||||
|
sshPolicy, err := policy.CompileSSHPolicy(node, users, peers)
|
||||||
|
if err != nil {
|
||||||
|
return []tailcfg.FilterRule{}, &tailcfg.SSHPolicy{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rules, sshPolicy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileFilterRules takes a set of nodes and an ACLPolicy and generates a
|
||||||
|
// set of Tailscale compatible FilterRules used to allow traffic on clients.
|
||||||
|
func (pol *ACLPolicy) CompileFilterRules(
|
||||||
|
users []types.User,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) ([]tailcfg.FilterRule, error) {
|
||||||
|
if pol == nil {
|
||||||
|
return tailcfg.FilterAllowAll, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rules []tailcfg.FilterRule
|
||||||
|
|
||||||
|
for index, acl := range pol.ACLs {
|
||||||
|
if acl.Action != "accept" {
|
||||||
|
return nil, ErrInvalidAction
|
||||||
|
}
|
||||||
|
|
||||||
|
var srcIPs []string
|
||||||
|
for srcIndex, src := range acl.Sources {
|
||||||
|
srcs, err := pol.expandSource(src, users, nodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"parsing policy, acl index: %d->%d: %w",
|
||||||
|
index,
|
||||||
|
srcIndex,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
srcIPs = append(srcIPs, srcs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
protocols, isWildcard, err := parseProtocol(acl.Protocol)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing policy, protocol err: %w ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
destPorts := []tailcfg.NetPortRange{}
|
||||||
|
for _, dest := range acl.Destinations {
|
||||||
|
alias, port, err := parseDestination(dest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
expanded, err := pol.ExpandAlias(
|
||||||
|
nodes,
|
||||||
|
users,
|
||||||
|
alias,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ports, err := expandPorts(port, isWildcard)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var dests []tailcfg.NetPortRange
|
||||||
|
for _, dest := range expanded.Prefixes() {
|
||||||
|
for _, port := range *ports {
|
||||||
|
pr := tailcfg.NetPortRange{
|
||||||
|
IP: dest.String(),
|
||||||
|
Ports: port,
|
||||||
|
}
|
||||||
|
dests = append(dests, pr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
destPorts = append(destPorts, dests...)
|
||||||
|
}
|
||||||
|
|
||||||
|
rules = append(rules, tailcfg.FilterRule{
|
||||||
|
SrcIPs: srcIPs,
|
||||||
|
DstPorts: destPorts,
|
||||||
|
IPProto: protocols,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return rules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pol *ACLPolicy) CompileSSHPolicy(
|
||||||
|
node *types.Node,
|
||||||
|
users []types.User,
|
||||||
|
peers types.Nodes,
|
||||||
|
) (*tailcfg.SSHPolicy, error) {
|
||||||
|
if pol == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rules []*tailcfg.SSHRule
|
||||||
|
|
||||||
|
acceptAction := tailcfg.SSHAction{
|
||||||
|
Message: "",
|
||||||
|
Reject: false,
|
||||||
|
Accept: true,
|
||||||
|
SessionDuration: 0,
|
||||||
|
AllowAgentForwarding: true,
|
||||||
|
HoldAndDelegate: "",
|
||||||
|
AllowLocalPortForwarding: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
rejectAction := tailcfg.SSHAction{
|
||||||
|
Message: "",
|
||||||
|
Reject: true,
|
||||||
|
Accept: false,
|
||||||
|
SessionDuration: 0,
|
||||||
|
AllowAgentForwarding: false,
|
||||||
|
HoldAndDelegate: "",
|
||||||
|
AllowLocalPortForwarding: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
for index, sshACL := range pol.SSHs {
|
||||||
|
var dest netipx.IPSetBuilder
|
||||||
|
for _, src := range sshACL.Destinations {
|
||||||
|
expanded, err := pol.ExpandAlias(append(peers, node), users, src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dest.AddSet(expanded)
|
||||||
|
}
|
||||||
|
|
||||||
|
destSet, err := dest.IPSet()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !node.InIPSet(destSet) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
action := rejectAction
|
||||||
|
switch sshACL.Action {
|
||||||
|
case "accept":
|
||||||
|
action = acceptAction
|
||||||
|
case "check":
|
||||||
|
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"parsing SSH policy, parsing check duration, index: %d: %w",
|
||||||
|
index,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
action = *checkAction
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"parsing SSH policy, unknown action %q, index: %d: %w",
|
||||||
|
sshACL.Action,
|
||||||
|
index,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var principals []*tailcfg.SSHPrincipal
|
||||||
|
for innerIndex, srcToken := range sshACL.Sources {
|
||||||
|
if isWildcard(srcToken) {
|
||||||
|
principals = []*tailcfg.SSHPrincipal{{
|
||||||
|
Any: true,
|
||||||
|
}}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the token is a group, expand the users and validate
|
||||||
|
// them. Then use the .Username() to get the login name
|
||||||
|
// that corresponds with the User info in the netmap.
|
||||||
|
if isGroup(srcToken) {
|
||||||
|
usersFromGroup, err := pol.expandUsersFromGroup(srcToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing SSH policy, expanding user from group, index: %d->%d: %w", index, innerIndex, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, userStr := range usersFromGroup {
|
||||||
|
user, err := findUserFromToken(users, userStr)
|
||||||
|
if err != nil {
|
||||||
|
log.Trace().Err(err).Msg("user not found")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||||
|
UserLogin: user.Username(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to check if the token is a user, if it is, then we
|
||||||
|
// can use the .Username() to get the login name that
|
||||||
|
// corresponds with the User info in the netmap.
|
||||||
|
// TODO(kradalby): This is a bit of a hack, and it should go
|
||||||
|
// away with the new policy where users can be reliably determined.
|
||||||
|
if user, err := findUserFromToken(users, srcToken); err == nil {
|
||||||
|
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||||
|
UserLogin: user.Username(),
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is kind of then non-ideal scenario where we dont really know
|
||||||
|
// what to do with the token, so we expand it to IP addresses of nodes.
|
||||||
|
// The pro here is that we have a pretty good lockdown on the mapping
|
||||||
|
// between users and node, but it can explode if a user owns many nodes.
|
||||||
|
ips, err := pol.ExpandAlias(
|
||||||
|
peers,
|
||||||
|
users,
|
||||||
|
srcToken,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing SSH policy, expanding alias, index: %d->%d: %w", index, innerIndex, err)
|
||||||
|
}
|
||||||
|
for addr := range util.IPSetAddrIter(ips) {
|
||||||
|
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||||
|
NodeIP: addr.String(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
userMap := make(map[string]string, len(sshACL.Users))
|
||||||
|
for _, user := range sshACL.Users {
|
||||||
|
userMap[user] = "="
|
||||||
|
}
|
||||||
|
rules = append(rules, &tailcfg.SSHRule{
|
||||||
|
Principals: principals,
|
||||||
|
SSHUsers: userMap,
|
||||||
|
Action: &action,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tailcfg.SSHPolicy{
|
||||||
|
Rules: rules,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
|
||||||
|
sessionLength, err := time.ParseDuration(duration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tailcfg.SSHAction{
|
||||||
|
Message: "",
|
||||||
|
Reject: false,
|
||||||
|
Accept: true,
|
||||||
|
SessionDuration: sessionLength,
|
||||||
|
AllowAgentForwarding: true,
|
||||||
|
HoldAndDelegate: "",
|
||||||
|
AllowLocalPortForwarding: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDestination(dest string) (string, string, error) {
|
||||||
|
var tokens []string
|
||||||
|
|
||||||
|
// Check if there is a IPv4/6:Port combination, IPv6 has more than
|
||||||
|
// three ":".
|
||||||
|
tokens = strings.Split(dest, ":")
|
||||||
|
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||||
|
port := tokens[len(tokens)-1]
|
||||||
|
|
||||||
|
maybeIPv6Str := strings.TrimSuffix(dest, ":"+port)
|
||||||
|
log.Trace().Str("maybeIPv6Str", maybeIPv6Str).Msg("")
|
||||||
|
|
||||||
|
filteredMaybeIPv6Str := maybeIPv6Str
|
||||||
|
if strings.Contains(maybeIPv6Str, "/") {
|
||||||
|
networkParts := strings.Split(maybeIPv6Str, "/")
|
||||||
|
filteredMaybeIPv6Str = networkParts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if maybeIPv6, err := netip.ParseAddr(filteredMaybeIPv6Str); err != nil && !maybeIPv6.Is6() {
|
||||||
|
log.Trace().Err(err).Msg("trying to parse as IPv6")
|
||||||
|
|
||||||
|
return "", "", fmt.Errorf(
|
||||||
|
"failed to parse destination, tokens %v: %w",
|
||||||
|
tokens,
|
||||||
|
ErrInvalidPortFormat,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
tokens = []string{maybeIPv6Str, port}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var alias string
|
||||||
|
// We can have here stuff like:
|
||||||
|
// git-server:*
|
||||||
|
// 192.168.1.0/24:22
|
||||||
|
// fd7a:115c:a1e0::2:22
|
||||||
|
// fd7a:115c:a1e0::2/128:22
|
||||||
|
// tag:montreal-webserver:80,443
|
||||||
|
// tag:api-server:443
|
||||||
|
// example-host-1:*
|
||||||
|
if len(tokens) == expectedTokenItems {
|
||||||
|
alias = tokens[0]
|
||||||
|
} else {
|
||||||
|
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
return alias, tokens[len(tokens)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseProtocol reads the proto field of the ACL and generates a list of
|
||||||
|
// protocols that will be allowed, following the IANA IP protocol number
|
||||||
|
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||||
|
//
|
||||||
|
// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP,
|
||||||
|
// as per Tailscale behaviour (see tailcfg.FilterRule).
|
||||||
|
//
|
||||||
|
// Also returns a boolean indicating if the protocol
|
||||||
|
// requires all the destinations to use wildcard as port number (only TCP,
|
||||||
|
// UDP and SCTP support specifying ports).
|
||||||
|
func parseProtocol(protocol string) ([]int, bool, error) {
|
||||||
|
switch protocol {
|
||||||
|
case "":
|
||||||
|
return nil, false, nil
|
||||||
|
case "igmp":
|
||||||
|
return []int{protocolIGMP}, true, nil
|
||||||
|
case "ipv4", "ip-in-ip":
|
||||||
|
return []int{protocolIPv4}, true, nil
|
||||||
|
case "tcp":
|
||||||
|
return []int{protocolTCP}, false, nil
|
||||||
|
case "egp":
|
||||||
|
return []int{protocolEGP}, true, nil
|
||||||
|
case "igp":
|
||||||
|
return []int{protocolIGP}, true, nil
|
||||||
|
case "udp":
|
||||||
|
return []int{protocolUDP}, false, nil
|
||||||
|
case "gre":
|
||||||
|
return []int{protocolGRE}, true, nil
|
||||||
|
case "esp":
|
||||||
|
return []int{protocolESP}, true, nil
|
||||||
|
case "ah":
|
||||||
|
return []int{protocolAH}, true, nil
|
||||||
|
case "sctp":
|
||||||
|
return []int{protocolSCTP}, false, nil
|
||||||
|
case "icmp":
|
||||||
|
return []int{protocolICMP, protocolIPv6ICMP}, true, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
protocolNumber, err := strconv.Atoi(protocol)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("parsing protocol number: %w", err)
|
||||||
|
}
|
||||||
|
needsWildcard := protocolNumber != protocolTCP &&
|
||||||
|
protocolNumber != protocolUDP &&
|
||||||
|
protocolNumber != protocolSCTP
|
||||||
|
|
||||||
|
return []int{protocolNumber}, needsWildcard, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandSource returns a set of Source IPs that would be associated
|
||||||
|
// with the given src alias.
|
||||||
|
func (pol *ACLPolicy) expandSource(
|
||||||
|
src string,
|
||||||
|
users []types.User,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) ([]string, error) {
|
||||||
|
ipSet, err := pol.ExpandAlias(nodes, users, src)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefixes []string
|
||||||
|
for _, prefix := range ipSet.Prefixes() {
|
||||||
|
prefixes = append(prefixes, prefix.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return prefixes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandalias has an input of either
|
||||||
|
// - a user
|
||||||
|
// - a group
|
||||||
|
// - a tag
|
||||||
|
// - a host
|
||||||
|
// - an ip
|
||||||
|
// - a cidr
|
||||||
|
// - an autogroup
|
||||||
|
// and transform these in IPAddresses.
|
||||||
|
func (pol *ACLPolicy) ExpandAlias(
|
||||||
|
nodes types.Nodes,
|
||||||
|
users []types.User,
|
||||||
|
alias string,
|
||||||
|
) (*netipx.IPSet, error) {
|
||||||
|
if isWildcard(alias) {
|
||||||
|
return util.ParseIPSet("*", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
build := netipx.IPSetBuilder{}
|
||||||
|
|
||||||
|
log.Debug().
|
||||||
|
Str("alias", alias).
|
||||||
|
Msg("Expanding")
|
||||||
|
|
||||||
|
// if alias is a group
|
||||||
|
if isGroup(alias) {
|
||||||
|
return pol.expandIPsFromGroup(alias, users, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if alias is a tag
|
||||||
|
if isTag(alias) {
|
||||||
|
return pol.expandIPsFromTag(alias, users, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isAutoGroup(alias) {
|
||||||
|
return expandAutoGroup(alias)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if alias is a user
|
||||||
|
if ips, err := pol.expandIPsFromUser(alias, users, nodes); ips != nil {
|
||||||
|
return ips, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if alias is an host
|
||||||
|
// Note, this is recursive.
|
||||||
|
if h, ok := pol.Hosts[alias]; ok {
|
||||||
|
log.Trace().Str("host", h.String()).Msg("ExpandAlias got hosts entry")
|
||||||
|
|
||||||
|
return pol.ExpandAlias(nodes, users, h.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// if alias is an IP
|
||||||
|
if ip, err := netip.ParseAddr(alias); err == nil {
|
||||||
|
return pol.expandIPsFromSingleIP(ip, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if alias is an IP Prefix (CIDR)
|
||||||
|
if prefix, err := netip.ParsePrefix(alias); err == nil {
|
||||||
|
return pol.expandIPsFromIPPrefix(prefix, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Warn().Msgf("No IPs found with the alias %v", alias)
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
// excludeCorrectlyTaggedNodes will remove from the list of input nodes the ones
|
||||||
|
// that are correctly tagged since they should not be listed as being in the user
|
||||||
|
// we assume in this function that we only have nodes from 1 user.
|
||||||
|
//
|
||||||
|
// TODO(kradalby): It is quite hard to understand what this function is doing,
|
||||||
|
// it seems like it trying to ensure that we dont include nodes that are tagged
|
||||||
|
// when we look up the nodes owned by a user.
|
||||||
|
// This should be refactored to be more clear as part of the Tags work in #1369.
|
||||||
|
func excludeCorrectlyTaggedNodes(
|
||||||
|
aclPolicy *ACLPolicy,
|
||||||
|
nodes types.Nodes,
|
||||||
|
user string,
|
||||||
|
) types.Nodes {
|
||||||
|
var out types.Nodes
|
||||||
|
var tags []string
|
||||||
|
for tag := range aclPolicy.TagOwners {
|
||||||
|
owners, _ := expandOwnersFromTag(aclPolicy, user)
|
||||||
|
ns := append(owners, user)
|
||||||
|
if slices.Contains(ns, user) {
|
||||||
|
tags = append(tags, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// for each node if tag is in tags list, don't append it.
|
||||||
|
for _, node := range nodes {
|
||||||
|
found := false
|
||||||
|
|
||||||
|
if node.Hostinfo != nil {
|
||||||
|
for _, t := range node.Hostinfo.RequestTags {
|
||||||
|
if slices.Contains(tags, t) {
|
||||||
|
found = true
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(node.ForcedTags) > 0 {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
out = append(out, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandPorts(portsStr string, isWild bool) (*[]tailcfg.PortRange, error) {
|
||||||
|
if isWildcard(portsStr) {
|
||||||
|
return &[]tailcfg.PortRange{
|
||||||
|
{First: portRangeBegin, Last: portRangeEnd},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isWild {
|
||||||
|
return nil, ErrWildcardIsNeeded
|
||||||
|
}
|
||||||
|
|
||||||
|
var ports []tailcfg.PortRange
|
||||||
|
for _, portStr := range strings.Split(portsStr, ",") {
|
||||||
|
log.Trace().Msgf("parsing portstring: %s", portStr)
|
||||||
|
rang := strings.Split(portStr, "-")
|
||||||
|
switch len(rang) {
|
||||||
|
case 1:
|
||||||
|
port, err := strconv.ParseUint(rang[0], util.Base10, util.BitSize16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ports = append(ports, tailcfg.PortRange{
|
||||||
|
First: uint16(port),
|
||||||
|
Last: uint16(port),
|
||||||
|
})
|
||||||
|
|
||||||
|
case expectedTokenItems:
|
||||||
|
start, err := strconv.ParseUint(rang[0], util.Base10, util.BitSize16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
last, err := strconv.ParseUint(rang[1], util.Base10, util.BitSize16)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ports = append(ports, tailcfg.PortRange{
|
||||||
|
First: uint16(start),
|
||||||
|
Last: uint16(last),
|
||||||
|
})
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, ErrInvalidPortFormat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ports, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandOwnersFromTag will return a list of user. An owner can be either a user or a group
|
||||||
|
// a group cannot be composed of groups.
|
||||||
|
func expandOwnersFromTag(
|
||||||
|
pol *ACLPolicy,
|
||||||
|
tag string,
|
||||||
|
) ([]string, error) {
|
||||||
|
noTagErr := fmt.Errorf(
|
||||||
|
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
|
||||||
|
ErrInvalidTag,
|
||||||
|
tag,
|
||||||
|
)
|
||||||
|
if pol == nil {
|
||||||
|
return []string{}, noTagErr
|
||||||
|
}
|
||||||
|
var owners []string
|
||||||
|
ows, ok := pol.TagOwners[tag]
|
||||||
|
if !ok {
|
||||||
|
return []string{}, noTagErr
|
||||||
|
}
|
||||||
|
for _, owner := range ows {
|
||||||
|
if isGroup(owner) {
|
||||||
|
gs, err := pol.expandUsersFromGroup(owner)
|
||||||
|
if err != nil {
|
||||||
|
return []string{}, err
|
||||||
|
}
|
||||||
|
owners = append(owners, gs...)
|
||||||
|
} else {
|
||||||
|
owners = append(owners, owner)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return owners, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandUsersFromGroup will return the list of user inside the group
|
||||||
|
// after some validation.
|
||||||
|
func (pol *ACLPolicy) expandUsersFromGroup(
|
||||||
|
group string,
|
||||||
|
) ([]string, error) {
|
||||||
|
var users []string
|
||||||
|
log.Trace().Caller().Interface("pol", pol).Msg("test")
|
||||||
|
aclGroups, ok := pol.Groups[group]
|
||||||
|
if !ok {
|
||||||
|
return []string{}, fmt.Errorf(
|
||||||
|
"group %v isn't registered. %w",
|
||||||
|
group,
|
||||||
|
ErrInvalidGroup,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
for _, group := range aclGroups {
|
||||||
|
if isGroup(group) {
|
||||||
|
return []string{}, fmt.Errorf(
|
||||||
|
"%w. A group cannot be composed of groups. https://tailscale.com/kb/1018/acls/#groups",
|
||||||
|
ErrInvalidGroup,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
users = append(users, group)
|
||||||
|
}
|
||||||
|
|
||||||
|
return users, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pol *ACLPolicy) expandIPsFromGroup(
|
||||||
|
group string,
|
||||||
|
users []types.User,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) (*netipx.IPSet, error) {
|
||||||
|
var build netipx.IPSetBuilder
|
||||||
|
|
||||||
|
userTokens, err := pol.expandUsersFromGroup(group)
|
||||||
|
if err != nil {
|
||||||
|
return &netipx.IPSet{}, err
|
||||||
|
}
|
||||||
|
for _, user := range userTokens {
|
||||||
|
filteredNodes := filterNodesByUser(nodes, users, user)
|
||||||
|
for _, node := range filteredNodes {
|
||||||
|
node.AppendToIPSet(&build)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pol *ACLPolicy) expandIPsFromTag(
|
||||||
|
alias string,
|
||||||
|
users []types.User,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) (*netipx.IPSet, error) {
|
||||||
|
var build netipx.IPSetBuilder
|
||||||
|
|
||||||
|
// check for forced tags
|
||||||
|
for _, node := range nodes {
|
||||||
|
if slices.Contains(node.ForcedTags, alias) {
|
||||||
|
node.AppendToIPSet(&build)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// find tag owners
|
||||||
|
owners, err := expandOwnersFromTag(pol, alias)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrInvalidTag) {
|
||||||
|
ipSet, _ := build.IPSet()
|
||||||
|
if len(ipSet.Prefixes()) == 0 {
|
||||||
|
return ipSet, fmt.Errorf(
|
||||||
|
"%w. %v isn't owned by a TagOwner and no forced tags are defined",
|
||||||
|
ErrInvalidTag,
|
||||||
|
alias,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// filter out nodes per tag owner
|
||||||
|
for _, user := range owners {
|
||||||
|
nodes := filterNodesByUser(nodes, users, user)
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.Hostinfo == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if slices.Contains(node.Hostinfo.RequestTags, alias) {
|
||||||
|
node.AppendToIPSet(&build)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pol *ACLPolicy) expandIPsFromUser(
|
||||||
|
user string,
|
||||||
|
users []types.User,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) (*netipx.IPSet, error) {
|
||||||
|
var build netipx.IPSetBuilder
|
||||||
|
|
||||||
|
filteredNodes := filterNodesByUser(nodes, users, user)
|
||||||
|
filteredNodes = excludeCorrectlyTaggedNodes(pol, filteredNodes, user)
|
||||||
|
|
||||||
|
// shortcurcuit if we have no nodes to get ips from.
|
||||||
|
if len(filteredNodes) == 0 {
|
||||||
|
return nil, nil // nolint
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range filteredNodes {
|
||||||
|
node.AppendToIPSet(&build)
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pol *ACLPolicy) expandIPsFromSingleIP(
|
||||||
|
ip netip.Addr,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) (*netipx.IPSet, error) {
|
||||||
|
log.Trace().Str("ip", ip.String()).Msg("ExpandAlias got ip")
|
||||||
|
|
||||||
|
matches := nodes.FilterByIP(ip)
|
||||||
|
|
||||||
|
var build netipx.IPSetBuilder
|
||||||
|
build.Add(ip)
|
||||||
|
|
||||||
|
for _, node := range matches {
|
||||||
|
node.AppendToIPSet(&build)
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pol *ACLPolicy) expandIPsFromIPPrefix(
|
||||||
|
prefix netip.Prefix,
|
||||||
|
nodes types.Nodes,
|
||||||
|
) (*netipx.IPSet, error) {
|
||||||
|
log.Trace().Str("prefix", prefix.String()).Msg("expandAlias got prefix")
|
||||||
|
var build netipx.IPSetBuilder
|
||||||
|
build.AddPrefix(prefix)
|
||||||
|
|
||||||
|
// This is suboptimal and quite expensive, but if we only add the prefix, we will miss all the relevant IPv6
|
||||||
|
// addresses for the hosts that belong to tailscale. This doesn't really affect stuff like subnet routers.
|
||||||
|
for _, node := range nodes {
|
||||||
|
for _, ip := range node.IPs() {
|
||||||
|
// log.Trace().
|
||||||
|
// Msgf("checking if node ip (%s) is part of prefix (%s): %v, is single ip prefix (%v), addr: %s", ip.String(), prefix.String(), prefix.Contains(ip), prefix.IsSingleIP(), prefix.Addr().String())
|
||||||
|
if prefix.Contains(ip) {
|
||||||
|
node.AppendToIPSet(&build)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return build.IPSet()
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandAutoGroup(alias string) (*netipx.IPSet, error) {
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(alias, "autogroup:internet"):
|
||||||
|
return util.TheInternet(), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown autogroup %q", alias)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWildcard(str string) bool {
|
||||||
|
return str == "*"
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGroup(str string) bool {
|
||||||
|
return strings.HasPrefix(str, "group:")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isTag(str string) bool {
|
||||||
|
return strings.HasPrefix(str, "tag:")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAutoGroup(str string) bool {
|
||||||
|
return strings.HasPrefix(str, "autogroup:")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagsOfNode will return the tags of the current node.
|
||||||
|
// Invalid tags are tags added by a user on a node, and that user doesn't have authority to add this tag.
|
||||||
|
// Valid tags are tags added by a user that is allowed in the ACL policy to add this tag.
|
||||||
|
func (pol *ACLPolicy) TagsOfNode(
|
||||||
|
users []types.User,
|
||||||
|
node *types.Node,
|
||||||
|
) ([]string, []string) {
|
||||||
|
var validTags []string
|
||||||
|
var invalidTags []string
|
||||||
|
|
||||||
|
// TODO(kradalby): Why is this sometimes nil? coming from tailNode?
|
||||||
|
if node == nil {
|
||||||
|
return validTags, invalidTags
|
||||||
|
}
|
||||||
|
|
||||||
|
validTagMap := make(map[string]bool)
|
||||||
|
invalidTagMap := make(map[string]bool)
|
||||||
|
if node.Hostinfo != nil {
|
||||||
|
for _, tag := range node.Hostinfo.RequestTags {
|
||||||
|
owners, err := expandOwnersFromTag(pol, tag)
|
||||||
|
if errors.Is(err, ErrInvalidTag) {
|
||||||
|
invalidTagMap[tag] = true
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var found bool
|
||||||
|
for _, owner := range owners {
|
||||||
|
user, err := findUserFromToken(users, owner)
|
||||||
|
if err != nil {
|
||||||
|
log.Trace().Caller().Err(err).Msg("could not determine user to filter tags by")
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.User.ID == user.ID {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if found {
|
||||||
|
validTagMap[tag] = true
|
||||||
|
} else {
|
||||||
|
invalidTagMap[tag] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for tag := range invalidTagMap {
|
||||||
|
invalidTags = append(invalidTags, tag)
|
||||||
|
}
|
||||||
|
for tag := range validTagMap {
|
||||||
|
validTags = append(validTags, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return validTags, invalidTags
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterNodesByUser returns a list of nodes that match the given userToken from a
|
||||||
|
// policy.
|
||||||
|
// Matching nodes are determined by first matching the user token to a user by checking:
|
||||||
|
// - If it is an ID that mactches the user database ID
|
||||||
|
// - It is the Provider Identifier from OIDC
|
||||||
|
// - It matches the username or email of a user
|
||||||
|
//
|
||||||
|
// If the token matches more than one user, zero nodes will returned.
|
||||||
|
func filterNodesByUser(nodes types.Nodes, users []types.User, userToken string) types.Nodes {
|
||||||
|
var out types.Nodes
|
||||||
|
|
||||||
|
user, err := findUserFromToken(users, userToken)
|
||||||
|
if err != nil {
|
||||||
|
log.Trace().Caller().Err(err).Msg("could not determine user to filter nodes by")
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node.User.ID == user.ID {
|
||||||
|
out = append(out, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrorNoUserMatching = errors.New("no user matching")
|
||||||
|
ErrorMultipleUserMatching = errors.New("multiple users matching")
|
||||||
|
)
|
||||||
|
|
||||||
|
// findUserFromToken finds and returns a user based on the given token, prioritizing matches by ProviderIdentifier, followed by email or name.
|
||||||
|
// If no matching user is found, it returns an error of type ErrorNoUserMatching.
|
||||||
|
// If multiple users match the token, it returns an error indicating multiple matches.
|
||||||
|
func findUserFromToken(users []types.User, token string) (types.User, error) {
|
||||||
|
var potentialUsers []types.User
|
||||||
|
|
||||||
|
// This adds the v2 support to looking up users with the new required
|
||||||
|
// policyv2 format where usernames have @ at the end if they are not emails.
|
||||||
|
token = strings.TrimSuffix(token, "@")
|
||||||
|
|
||||||
|
for _, user := range users {
|
||||||
|
if user.ProviderIdentifier.Valid && user.ProviderIdentifier.String == token {
|
||||||
|
// Prioritize ProviderIdentifier match and exit early
|
||||||
|
return user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if user.Email == token || user.Name == token {
|
||||||
|
potentialUsers = append(potentialUsers, user)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(potentialUsers) == 0 {
|
||||||
|
return types.User{}, fmt.Errorf("user with token %q not found: %w", token, ErrorNoUserMatching)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(potentialUsers) > 1 {
|
||||||
|
return types.User{}, fmt.Errorf("multiple users with token %q found: %w", token, ErrorNoUserMatching)
|
||||||
|
}
|
||||||
|
|
||||||
|
return potentialUsers[0], nil
|
||||||
|
}
|
2797
hscontrol/policy/v1/acls_test.go
Normal file
2797
hscontrol/policy/v1/acls_test.go
Normal file
File diff suppressed because it is too large
Load diff
123
hscontrol/policy/v1/acls_types.go
Normal file
123
hscontrol/policy/v1/acls_types.go
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/netip"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/tailscale/hujson"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ACLPolicy represents a Tailscale ACL Policy.
|
||||||
|
type ACLPolicy struct {
|
||||||
|
Groups Groups `json:"groups"`
|
||||||
|
Hosts Hosts `json:"hosts"`
|
||||||
|
TagOwners TagOwners `json:"tagOwners"`
|
||||||
|
ACLs []ACL `json:"acls"`
|
||||||
|
Tests []ACLTest `json:"tests"`
|
||||||
|
AutoApprovers AutoApprovers `json:"autoApprovers"`
|
||||||
|
SSHs []SSH `json:"ssh"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACL is a basic rule for the ACL Policy.
|
||||||
|
type ACL struct {
|
||||||
|
Action string `json:"action"`
|
||||||
|
Protocol string `json:"proto"`
|
||||||
|
Sources []string `json:"src"`
|
||||||
|
Destinations []string `json:"dst"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Groups references a series of alias in the ACL rules.
|
||||||
|
type Groups map[string][]string
|
||||||
|
|
||||||
|
// Hosts are alias for IP addresses or subnets.
|
||||||
|
type Hosts map[string]netip.Prefix
|
||||||
|
|
||||||
|
// TagOwners specify what users (users?) are allow to use certain tags.
|
||||||
|
type TagOwners map[string][]string
|
||||||
|
|
||||||
|
// ACLTest is not implemented, but should be used to check if a certain rule is allowed.
|
||||||
|
type ACLTest struct {
|
||||||
|
Source string `json:"src"`
|
||||||
|
Accept []string `json:"accept"`
|
||||||
|
Deny []string `json:"deny,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoApprovers specify which users, groups or tags have their advertised routes
|
||||||
|
// or exit node status automatically enabled.
|
||||||
|
type AutoApprovers struct {
|
||||||
|
Routes map[string][]string `json:"routes"`
|
||||||
|
ExitNode []string `json:"exitNode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SSH controls who can ssh into which machines.
|
||||||
|
type SSH struct {
|
||||||
|
Action string `json:"action"`
|
||||||
|
Sources []string `json:"src"`
|
||||||
|
Destinations []string `json:"dst"`
|
||||||
|
Users []string `json:"users"`
|
||||||
|
CheckPeriod string `json:"checkPeriod,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON allows to parse the Hosts directly into netip objects.
|
||||||
|
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||||
|
newHosts := Hosts{}
|
||||||
|
hostIPPrefixMap := make(map[string]string)
|
||||||
|
ast, err := hujson.Parse(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ast.Standardize()
|
||||||
|
data = ast.Pack()
|
||||||
|
err = json.Unmarshal(data, &hostIPPrefixMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for host, prefixStr := range hostIPPrefixMap {
|
||||||
|
if !strings.Contains(prefixStr, "/") {
|
||||||
|
prefixStr += "/32"
|
||||||
|
}
|
||||||
|
prefix, err := netip.ParsePrefix(prefixStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newHosts[host] = prefix
|
||||||
|
}
|
||||||
|
*hosts = newHosts
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsZero is perhaps a bit naive here.
|
||||||
|
func (pol ACLPolicy) IsZero() bool {
|
||||||
|
if len(pol.Groups) == 0 && len(pol.Hosts) == 0 && len(pol.ACLs) == 0 && len(pol.SSHs) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRouteApprovers returns the list of autoApproving users, groups or tags for a given IPPrefix.
|
||||||
|
func (autoApprovers *AutoApprovers) GetRouteApprovers(
|
||||||
|
prefix netip.Prefix,
|
||||||
|
) ([]string, error) {
|
||||||
|
if prefix.Bits() == 0 {
|
||||||
|
return autoApprovers.ExitNode, nil // 0.0.0.0/0, ::/0 or equivalent
|
||||||
|
}
|
||||||
|
|
||||||
|
approverAliases := make([]string, 0)
|
||||||
|
|
||||||
|
for autoApprovedPrefix, autoApproverAliases := range autoApprovers.Routes {
|
||||||
|
autoApprovedPrefix, err := netip.ParsePrefix(autoApprovedPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Bits() >= autoApprovedPrefix.Bits() &&
|
||||||
|
autoApprovedPrefix.Contains(prefix.Masked().Addr()) {
|
||||||
|
approverAliases = append(approverAliases, autoApproverAliases...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return approverAliases, nil
|
||||||
|
}
|
188
hscontrol/policy/v1/policy.go
Normal file
188
hscontrol/policy/v1/policy.go
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
"tailscale.com/util/deephash"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewPolicyManagerFromPath(path string, users []types.User, nodes types.Nodes) (*PolicyManager, error) {
|
||||||
|
policyFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer policyFile.Close()
|
||||||
|
|
||||||
|
policyBytes, err := io.ReadAll(policyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewPolicyManager(policyBytes, users, nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPolicyManager(polB []byte, users []types.User, nodes types.Nodes) (*PolicyManager, error) {
|
||||||
|
var pol *ACLPolicy
|
||||||
|
var err error
|
||||||
|
if polB != nil && len(polB) > 0 {
|
||||||
|
pol, err = LoadACLPolicyFromBytes(polB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing policy: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pm := PolicyManager{
|
||||||
|
pol: pol,
|
||||||
|
users: users,
|
||||||
|
nodes: nodes,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = pm.updateLocked()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &pm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type PolicyManager struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
pol *ACLPolicy
|
||||||
|
polHash deephash.Sum
|
||||||
|
|
||||||
|
users []types.User
|
||||||
|
nodes types.Nodes
|
||||||
|
|
||||||
|
filter []tailcfg.FilterRule
|
||||||
|
filterHash deephash.Sum
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateLocked updates the filter rules based on the current policy and nodes.
|
||||||
|
// It must be called with the lock held.
|
||||||
|
func (pm *PolicyManager) updateLocked() (bool, error) {
|
||||||
|
filter, err := pm.pol.CompileFilterRules(pm.users, pm.nodes)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("compiling filter rules: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
polHash := deephash.Hash(pm.pol)
|
||||||
|
filterHash := deephash.Hash(&filter)
|
||||||
|
|
||||||
|
if polHash == pm.polHash && filterHash == pm.filterHash {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pm.filter = filter
|
||||||
|
pm.filterHash = filterHash
|
||||||
|
pm.polHash = polHash
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
return pm.filter, matcher.MatchesFromFilterRules(pm.filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) SSHPolicy(node *types.Node) (*tailcfg.SSHPolicy, error) {
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
|
||||||
|
return pm.pol.CompileSSHPolicy(node, pm.users, pm.nodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) SetPolicy(polB []byte) (bool, error) {
|
||||||
|
if len(polB) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pol, err := LoadACLPolicyFromBytes(polB)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("parsing policy: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
|
||||||
|
pm.pol = pol
|
||||||
|
|
||||||
|
return pm.updateLocked()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsers updates the users in the policy manager and updates the filter rules.
|
||||||
|
func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) {
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
|
||||||
|
pm.users = users
|
||||||
|
return pm.updateLocked()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNodes updates the nodes in the policy manager and updates the filter rules.
|
||||||
|
func (pm *PolicyManager) SetNodes(nodes types.Nodes) (bool, error) {
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
pm.nodes = nodes
|
||||||
|
return pm.updateLocked()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool {
|
||||||
|
if pm == nil || pm.pol == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
|
||||||
|
tags, invalid := pm.pol.TagsOfNode(pm.users, node)
|
||||||
|
log.Debug().Strs("authorised_tags", tags).Strs("unauthorised_tags", invalid).Uint64("node.id", node.ID.Uint64()).Msg("tags provided by policy")
|
||||||
|
|
||||||
|
return slices.Contains(tags, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefix) bool {
|
||||||
|
if pm == nil || pm.pol == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
pm.mu.Lock()
|
||||||
|
defer pm.mu.Unlock()
|
||||||
|
|
||||||
|
approvers, _ := pm.pol.AutoApprovers.GetRouteApprovers(route)
|
||||||
|
|
||||||
|
for _, approvedAlias := range approvers {
|
||||||
|
if approvedAlias == node.User.Username() {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
ips, err := pm.pol.ExpandAlias(pm.nodes, pm.users, approvedAlias)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// approvedIPs should contain all of node's IPs if it matches the rule, so check for first
|
||||||
|
if ips != nil && ips.Contains(*node.IPv4) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) Version() int {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pm *PolicyManager) DebugString() string {
|
||||||
|
return "not implemented for v1"
|
||||||
|
}
|
180
hscontrol/policy/v1/policy_test.go
Normal file
180
hscontrol/policy/v1/policy_test.go
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"tailscale.com/tailcfg"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPolicySetChange(t *testing.T) {
|
||||||
|
users := []types.User{
|
||||||
|
{
|
||||||
|
Model: gorm.Model{ID: 1},
|
||||||
|
Name: "testuser",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
users []types.User
|
||||||
|
nodes types.Nodes
|
||||||
|
policy []byte
|
||||||
|
wantUsersChange bool
|
||||||
|
wantNodesChange bool
|
||||||
|
wantPolicyChange bool
|
||||||
|
wantFilter []tailcfg.FilterRule
|
||||||
|
wantMatchers []matcher.Match
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "set-nodes",
|
||||||
|
nodes: types.Nodes{
|
||||||
|
{
|
||||||
|
IPv4: iap("100.64.0.2"),
|
||||||
|
User: users[0],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantNodesChange: false,
|
||||||
|
wantFilter: []tailcfg.FilterRule{
|
||||||
|
{
|
||||||
|
DstPorts: []tailcfg.NetPortRange{{IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMatchers: []matcher.Match{
|
||||||
|
matcher.MatchFromStrings([]string{}, []string{"100.64.0.1/32"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "set-users",
|
||||||
|
users: users,
|
||||||
|
wantUsersChange: false,
|
||||||
|
wantFilter: []tailcfg.FilterRule{
|
||||||
|
{
|
||||||
|
DstPorts: []tailcfg.NetPortRange{{IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMatchers: []matcher.Match{
|
||||||
|
matcher.MatchFromStrings([]string{}, []string{"100.64.0.1/32"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "set-users-and-node",
|
||||||
|
users: users,
|
||||||
|
nodes: types.Nodes{
|
||||||
|
{
|
||||||
|
IPv4: iap("100.64.0.2"),
|
||||||
|
User: users[0],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantUsersChange: false,
|
||||||
|
wantNodesChange: true,
|
||||||
|
wantFilter: []tailcfg.FilterRule{
|
||||||
|
{
|
||||||
|
SrcIPs: []string{"100.64.0.2/32"},
|
||||||
|
DstPorts: []tailcfg.NetPortRange{{IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMatchers: []matcher.Match{
|
||||||
|
matcher.MatchFromStrings([]string{"100.64.0.2/32"}, []string{"100.64.0.1/32"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "set-policy",
|
||||||
|
policy: []byte(`
|
||||||
|
{
|
||||||
|
"acls": [
|
||||||
|
{
|
||||||
|
"action": "accept",
|
||||||
|
"src": [
|
||||||
|
"100.64.0.61",
|
||||||
|
],
|
||||||
|
"dst": [
|
||||||
|
"100.64.0.62:*",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
`),
|
||||||
|
wantPolicyChange: true,
|
||||||
|
wantFilter: []tailcfg.FilterRule{
|
||||||
|
{
|
||||||
|
SrcIPs: []string{"100.64.0.61/32"},
|
||||||
|
DstPorts: []tailcfg.NetPortRange{{IP: "100.64.0.62/32", Ports: tailcfg.PortRangeAny}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantMatchers: []matcher.Match{
|
||||||
|
matcher.MatchFromStrings([]string{"100.64.0.61/32"}, []string{"100.64.0.62/32"}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
pol := `
|
||||||
|
{
|
||||||
|
"groups": {
|
||||||
|
"group:example": [
|
||||||
|
"testuser",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
|
||||||
|
"hosts": {
|
||||||
|
"host-1": "100.64.0.1",
|
||||||
|
"subnet-1": "100.100.101.100/24",
|
||||||
|
},
|
||||||
|
|
||||||
|
"acls": [
|
||||||
|
{
|
||||||
|
"action": "accept",
|
||||||
|
"src": [
|
||||||
|
"group:example",
|
||||||
|
],
|
||||||
|
"dst": [
|
||||||
|
"host-1:*",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
`
|
||||||
|
pm, err := NewPolicyManager([]byte(pol), []types.User{}, types.Nodes{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if tt.policy != nil {
|
||||||
|
change, err := pm.SetPolicy(tt.policy)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, tt.wantPolicyChange, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tt.users != nil {
|
||||||
|
change, err := pm.SetUsers(tt.users)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, tt.wantUsersChange, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tt.nodes != nil {
|
||||||
|
change, err := pm.SetNodes(tt.nodes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, tt.wantNodesChange, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
filter, matchers := pm.Filter()
|
||||||
|
if diff := cmp.Diff(tt.wantFilter, filter); diff != "" {
|
||||||
|
t.Errorf("TestPolicySetChange() unexpected filter (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(
|
||||||
|
tt.wantMatchers,
|
||||||
|
matchers,
|
||||||
|
cmp.AllowUnexported(matcher.Match{}),
|
||||||
|
); diff != "" {
|
||||||
|
t.Errorf("TestPolicySetChange() unexpected matchers (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -33,60 +33,6 @@ func (a Asterix) String() string {
|
||||||
return "*"
|
return "*"
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the Asterix to JSON.
|
|
||||||
func (a Asterix) MarshalJSON() ([]byte, error) {
|
|
||||||
return []byte(`"*"`), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the AliasWithPorts to JSON.
|
|
||||||
func (a AliasWithPorts) MarshalJSON() ([]byte, error) {
|
|
||||||
if a.Alias == nil {
|
|
||||||
return []byte(`""`), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var alias string
|
|
||||||
switch v := a.Alias.(type) {
|
|
||||||
case *Username:
|
|
||||||
alias = string(*v)
|
|
||||||
case *Group:
|
|
||||||
alias = string(*v)
|
|
||||||
case *Tag:
|
|
||||||
alias = string(*v)
|
|
||||||
case *Host:
|
|
||||||
alias = string(*v)
|
|
||||||
case *Prefix:
|
|
||||||
alias = v.String()
|
|
||||||
case *AutoGroup:
|
|
||||||
alias = string(*v)
|
|
||||||
case Asterix:
|
|
||||||
alias = "*"
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown alias type: %T", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no ports are specified
|
|
||||||
if len(a.Ports) == 0 {
|
|
||||||
return json.Marshal(alias)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's the wildcard port range
|
|
||||||
if len(a.Ports) == 1 && a.Ports[0].First == 0 && a.Ports[0].Last == 65535 {
|
|
||||||
return json.Marshal(fmt.Sprintf("%s:*", alias))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, format as "alias:ports"
|
|
||||||
var ports []string
|
|
||||||
for _, port := range a.Ports {
|
|
||||||
if port.First == port.Last {
|
|
||||||
ports = append(ports, fmt.Sprintf("%d", port.First))
|
|
||||||
} else {
|
|
||||||
ports = append(ports, fmt.Sprintf("%d-%d", port.First, port.Last))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(fmt.Sprintf("%s:%s", alias, strings.Join(ports, ",")))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a Asterix) UnmarshalJSON(b []byte) error {
|
func (a Asterix) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -117,16 +63,6 @@ func (u *Username) String() string {
|
||||||
return string(*u)
|
return string(*u)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the Username to JSON.
|
|
||||||
func (u Username) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(string(u))
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the Prefix to JSON.
|
|
||||||
func (p Prefix) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(p.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *Username) UnmarshalJSON(b []byte) error {
|
func (u *Username) UnmarshalJSON(b []byte) error {
|
||||||
*u = Username(strings.Trim(string(b), `"`))
|
*u = Username(strings.Trim(string(b), `"`))
|
||||||
if err := u.Validate(); err != nil {
|
if err := u.Validate(); err != nil {
|
||||||
|
@ -227,25 +163,10 @@ func (g Group) CanBeAutoApprover() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the string representation of the Group.
|
|
||||||
func (g Group) String() string {
|
func (g Group) String() string {
|
||||||
return string(g)
|
return string(g)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h Host) String() string {
|
|
||||||
return string(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the Host to JSON.
|
|
||||||
func (h Host) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(string(h))
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the Group to JSON.
|
|
||||||
func (g Group) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(string(g))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g Group) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
func (g Group) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
||||||
var ips netipx.IPSetBuilder
|
var ips netipx.IPSetBuilder
|
||||||
var errs []error
|
var errs []error
|
||||||
|
@ -323,11 +244,6 @@ func (t Tag) String() string {
|
||||||
return string(t)
|
return string(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the Tag to JSON.
|
|
||||||
func (t Tag) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(string(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host is a string that represents a hostname.
|
// Host is a string that represents a hostname.
|
||||||
type Host string
|
type Host string
|
||||||
|
|
||||||
|
@ -363,7 +279,7 @@ func (h Host) Resolve(p *Policy, _ types.Users, nodes types.Nodes) (*netipx.IPSe
|
||||||
|
|
||||||
// If the IP is a single host, look for a node to ensure we add all the IPs of
|
// If the IP is a single host, look for a node to ensure we add all the IPs of
|
||||||
// the node to the IPSet.
|
// the node to the IPSet.
|
||||||
appendIfNodeHasIP(nodes, &ips, netip.Prefix(pref))
|
// appendIfNodeHasIP(nodes, &ips, pref)
|
||||||
|
|
||||||
// TODO(kradalby): I am a bit unsure what is the correct way to do this,
|
// TODO(kradalby): I am a bit unsure what is the correct way to do this,
|
||||||
// should a host with a non single IP be able to resolve the full host (inc all IPs).
|
// should a host with a non single IP be able to resolve the full host (inc all IPs).
|
||||||
|
@ -439,23 +355,28 @@ func (p Prefix) Resolve(_ *Policy, _ types.Users, nodes types.Nodes) (*netipx.IP
|
||||||
ips.AddPrefix(netip.Prefix(p))
|
ips.AddPrefix(netip.Prefix(p))
|
||||||
// If the IP is a single host, look for a node to ensure we add all the IPs of
|
// If the IP is a single host, look for a node to ensure we add all the IPs of
|
||||||
// the node to the IPSet.
|
// the node to the IPSet.
|
||||||
appendIfNodeHasIP(nodes, &ips, netip.Prefix(p))
|
// appendIfNodeHasIP(nodes, &ips, pref)
|
||||||
|
|
||||||
return buildIPSetMultiErr(&ips, errs)
|
// TODO(kradalby): I am a bit unsure what is the correct way to do this,
|
||||||
}
|
// should a host with a non single IP be able to resolve the full host (inc all IPs).
|
||||||
|
// Currently this is done because the old implementation did this, we might want to
|
||||||
// appendIfNodeHasIP appends the IPs of the nodes to the IPSet if the node has the
|
// drop it before releasing.
|
||||||
// IP address in the prefix.
|
// For example:
|
||||||
func appendIfNodeHasIP(nodes types.Nodes, ips *netipx.IPSetBuilder, pref netip.Prefix) {
|
// If a src or dst includes "64.0.0.0/2:*", it will include 100.64/16 range, which
|
||||||
if !pref.IsSingleIP() && !tsaddr.IsTailscaleIP(pref.Addr()) {
|
// means that it will need to fetch the IPv6 addrs of the node to include the full range.
|
||||||
return
|
// Clearly, if a user sets the dst to be "64.0.0.0/2:*", it is likely more of a exit node
|
||||||
|
// and this would be strange behaviour.
|
||||||
|
ipsTemp, err := ips.IPSet()
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if node.HasIP(pref.Addr()) {
|
if node.InIPSet(ipsTemp) {
|
||||||
node.AppendToIPSet(ips)
|
node.AppendToIPSet(&ips)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return buildIPSetMultiErr(&ips, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoGroup is a special string which is always prefixed with `autogroup:`
|
// AutoGroup is a special string which is always prefixed with `autogroup:`
|
||||||
|
@ -463,20 +384,15 @@ type AutoGroup string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AutoGroupInternet AutoGroup = "autogroup:internet"
|
AutoGroupInternet AutoGroup = "autogroup:internet"
|
||||||
AutoGroupMember AutoGroup = "autogroup:member"
|
|
||||||
AutoGroupNonRoot AutoGroup = "autogroup:nonroot"
|
AutoGroupNonRoot AutoGroup = "autogroup:nonroot"
|
||||||
AutoGroupTagged AutoGroup = "autogroup:tagged"
|
|
||||||
|
|
||||||
// These are not yet implemented.
|
// These are not yet implemented.
|
||||||
AutoGroupSelf AutoGroup = "autogroup:self"
|
AutoGroupSelf AutoGroup = "autogroup:self"
|
||||||
|
AutoGroupMember AutoGroup = "autogroup:member"
|
||||||
|
AutoGroupTagged AutoGroup = "autogroup:tagged"
|
||||||
)
|
)
|
||||||
|
|
||||||
var autogroups = []AutoGroup{
|
var autogroups = []AutoGroup{AutoGroupInternet}
|
||||||
AutoGroupInternet,
|
|
||||||
AutoGroupMember,
|
|
||||||
AutoGroupNonRoot,
|
|
||||||
AutoGroupTagged,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ag AutoGroup) Validate() error {
|
func (ag AutoGroup) Validate() error {
|
||||||
if slices.Contains(autogroups, ag) {
|
if slices.Contains(autogroups, ag) {
|
||||||
|
@ -494,81 +410,13 @@ func (ag *AutoGroup) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the AutoGroup to JSON.
|
func (ag AutoGroup) Resolve(_ *Policy, _ types.Users, _ types.Nodes) (*netipx.IPSet, error) {
|
||||||
func (ag AutoGroup) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(string(ag))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
|
||||||
var build netipx.IPSetBuilder
|
|
||||||
|
|
||||||
switch ag {
|
switch ag {
|
||||||
case AutoGroupInternet:
|
case AutoGroupInternet:
|
||||||
return util.TheInternet(), nil
|
return util.TheInternet(), nil
|
||||||
|
|
||||||
case AutoGroupMember:
|
|
||||||
// autogroup:member represents all untagged devices in the tailnet.
|
|
||||||
tagMap, err := resolveTagOwners(p, users, nodes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
// Skip if node has forced tags
|
|
||||||
if len(node.ForcedTags) != 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip if node has any allowed requested tags
|
|
||||||
hasAllowedTag := false
|
|
||||||
if node.Hostinfo != nil && len(node.Hostinfo.RequestTags) != 0 {
|
|
||||||
for _, tag := range node.Hostinfo.RequestTags {
|
|
||||||
if tagips, ok := tagMap[Tag(tag)]; ok && node.InIPSet(tagips) {
|
|
||||||
hasAllowedTag = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hasAllowedTag {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node is a member if it has no forced tags and no allowed requested tags
|
|
||||||
node.AppendToIPSet(&build)
|
|
||||||
}
|
|
||||||
|
|
||||||
return build.IPSet()
|
|
||||||
|
|
||||||
case AutoGroupTagged:
|
|
||||||
// autogroup:tagged represents all devices with a tag in the tailnet.
|
|
||||||
tagMap, err := resolveTagOwners(p, users, nodes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, node := range nodes {
|
|
||||||
// Include if node has forced tags
|
|
||||||
if len(node.ForcedTags) != 0 {
|
|
||||||
node.AppendToIPSet(&build)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include if node has any allowed requested tags
|
|
||||||
if node.Hostinfo != nil && len(node.Hostinfo.RequestTags) != 0 {
|
|
||||||
for _, tag := range node.Hostinfo.RequestTags {
|
|
||||||
if _, ok := tagMap[Tag(tag)]; ok {
|
|
||||||
node.AppendToIPSet(&build)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return build.IPSet()
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown autogroup %q", ag)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ag *AutoGroup) Is(c AutoGroup) bool {
|
func (ag *AutoGroup) Is(c AutoGroup) bool {
|
||||||
|
@ -728,37 +576,6 @@ func (a *Aliases) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the Aliases to JSON.
|
|
||||||
func (a Aliases) MarshalJSON() ([]byte, error) {
|
|
||||||
if a == nil {
|
|
||||||
return []byte("[]"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
aliases := make([]string, len(a))
|
|
||||||
for i, alias := range a {
|
|
||||||
switch v := alias.(type) {
|
|
||||||
case *Username:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Group:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Tag:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Host:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Prefix:
|
|
||||||
aliases[i] = v.String()
|
|
||||||
case *AutoGroup:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case Asterix:
|
|
||||||
aliases[i] = "*"
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown alias type: %T", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(aliases)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a Aliases) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
func (a Aliases) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
||||||
var ips netipx.IPSetBuilder
|
var ips netipx.IPSetBuilder
|
||||||
var errs []error
|
var errs []error
|
||||||
|
@ -817,29 +634,6 @@ func (aa *AutoApprovers) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the AutoApprovers to JSON.
|
|
||||||
func (aa AutoApprovers) MarshalJSON() ([]byte, error) {
|
|
||||||
if aa == nil {
|
|
||||||
return []byte("[]"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
approvers := make([]string, len(aa))
|
|
||||||
for i, approver := range aa {
|
|
||||||
switch v := approver.(type) {
|
|
||||||
case *Username:
|
|
||||||
approvers[i] = string(*v)
|
|
||||||
case *Tag:
|
|
||||||
approvers[i] = string(*v)
|
|
||||||
case *Group:
|
|
||||||
approvers[i] = string(*v)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown auto approver type: %T", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(approvers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseAutoApprover(s string) (AutoApprover, error) {
|
func parseAutoApprover(s string) (AutoApprover, error) {
|
||||||
switch {
|
switch {
|
||||||
case isUser(s):
|
case isUser(s):
|
||||||
|
@ -909,27 +703,6 @@ func (o *Owners) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the Owners to JSON.
|
|
||||||
func (o Owners) MarshalJSON() ([]byte, error) {
|
|
||||||
if o == nil {
|
|
||||||
return []byte("[]"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
owners := make([]string, len(o))
|
|
||||||
for i, owner := range o {
|
|
||||||
switch v := owner.(type) {
|
|
||||||
case *Username:
|
|
||||||
owners[i] = string(*v)
|
|
||||||
case *Group:
|
|
||||||
owners[i] = string(*v)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown owner type: %T", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(owners)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseOwner(s string) (Owner, error) {
|
func parseOwner(s string) (Owner, error) {
|
||||||
switch {
|
switch {
|
||||||
case isUser(s):
|
case isUser(s):
|
||||||
|
@ -1016,64 +789,22 @@ func (h *Hosts) UnmarshalJSON(b []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var prefix Prefix
|
var pref Prefix
|
||||||
if err := prefix.parseString(value); err != nil {
|
err := pref.parseString(value)
|
||||||
return fmt.Errorf(`Hostname "%s" contains an invalid IP address: "%s"`, key, value)
|
if err != nil {
|
||||||
|
return fmt.Errorf("Hostname %q contains an invalid IP address: %q", key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
(*h)[host] = prefix
|
(*h)[host] = pref
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the Hosts to JSON.
|
|
||||||
func (h Hosts) MarshalJSON() ([]byte, error) {
|
|
||||||
if h == nil {
|
|
||||||
return []byte("{}"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rawHosts := make(map[string]string)
|
|
||||||
for host, prefix := range h {
|
|
||||||
rawHosts[string(host)] = prefix.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(rawHosts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h Hosts) exist(name Host) bool {
|
func (h Hosts) exist(name Host) bool {
|
||||||
_, ok := h[name]
|
_, ok := h[name]
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the TagOwners to JSON.
|
|
||||||
func (to TagOwners) MarshalJSON() ([]byte, error) {
|
|
||||||
if to == nil {
|
|
||||||
return []byte("{}"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rawTagOwners := make(map[string][]string)
|
|
||||||
for tag, owners := range to {
|
|
||||||
tagStr := string(tag)
|
|
||||||
ownerStrs := make([]string, len(owners))
|
|
||||||
|
|
||||||
for i, owner := range owners {
|
|
||||||
switch v := owner.(type) {
|
|
||||||
case *Username:
|
|
||||||
ownerStrs[i] = string(*v)
|
|
||||||
case *Group:
|
|
||||||
ownerStrs[i] = string(*v)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown owner type: %T", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rawTagOwners[tagStr] = ownerStrs
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(rawTagOwners)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagOwners are a map of Tag to a list of the UserEntities that own the tag.
|
// TagOwners are a map of Tag to a list of the UserEntities that own the tag.
|
||||||
type TagOwners map[Tag]Owners
|
type TagOwners map[Tag]Owners
|
||||||
|
|
||||||
|
@ -1127,32 +858,8 @@ func resolveTagOwners(p *Policy, users types.Users, nodes types.Nodes) (map[Tag]
|
||||||
}
|
}
|
||||||
|
|
||||||
type AutoApproverPolicy struct {
|
type AutoApproverPolicy struct {
|
||||||
Routes map[netip.Prefix]AutoApprovers `json:"routes,omitempty"`
|
Routes map[netip.Prefix]AutoApprovers `json:"routes"`
|
||||||
ExitNode AutoApprovers `json:"exitNode,omitempty"`
|
ExitNode AutoApprovers `json:"exitNode"`
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the AutoApproverPolicy to JSON.
|
|
||||||
func (ap AutoApproverPolicy) MarshalJSON() ([]byte, error) {
|
|
||||||
// Marshal empty policies as empty object
|
|
||||||
if ap.Routes == nil && ap.ExitNode == nil {
|
|
||||||
return []byte("{}"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Alias AutoApproverPolicy
|
|
||||||
|
|
||||||
// Create a new object to avoid marshalling nil slices as null instead of empty arrays
|
|
||||||
obj := Alias(ap)
|
|
||||||
|
|
||||||
// Initialize empty maps/slices to ensure they're marshalled as empty objects/arrays instead of null
|
|
||||||
if obj.Routes == nil {
|
|
||||||
obj.Routes = make(map[netip.Prefix]AutoApprovers)
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.ExitNode == nil {
|
|
||||||
obj.ExitNode = AutoApprovers{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(&obj)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveAutoApprovers resolves the AutoApprovers to a map of netip.Prefix to netipx.IPSet.
|
// resolveAutoApprovers resolves the AutoApprovers to a map of netip.Prefix to netipx.IPSet.
|
||||||
|
@ -1236,25 +943,21 @@ type Policy struct {
|
||||||
// callers using it should panic if not
|
// callers using it should panic if not
|
||||||
validated bool `json:"-"`
|
validated bool `json:"-"`
|
||||||
|
|
||||||
Groups Groups `json:"groups,omitempty"`
|
Groups Groups `json:"groups"`
|
||||||
Hosts Hosts `json:"hosts,omitempty"`
|
Hosts Hosts `json:"hosts"`
|
||||||
TagOwners TagOwners `json:"tagOwners,omitempty"`
|
TagOwners TagOwners `json:"tagOwners"`
|
||||||
ACLs []ACL `json:"acls,omitempty"`
|
ACLs []ACL `json:"acls"`
|
||||||
AutoApprovers AutoApproverPolicy `json:"autoApprovers,omitempty"`
|
AutoApprovers AutoApproverPolicy `json:"autoApprovers"`
|
||||||
SSHs []SSH `json:"ssh,omitempty"`
|
SSHs []SSH `json:"ssh"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON is deliberately not implemented for Policy.
|
|
||||||
// We use the default JSON marshalling behavior provided by the Go runtime.
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// TODO(kradalby): Add these checks for tagOwners and autoApprovers
|
autogroupForSrc = []AutoGroup{}
|
||||||
autogroupForSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
autogroupForDst = []AutoGroup{AutoGroupInternet}
|
||||||
autogroupForDst = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged}
|
autogroupForSSHSrc = []AutoGroup{}
|
||||||
autogroupForSSHSrc = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
autogroupForSSHDst = []AutoGroup{}
|
||||||
autogroupForSSHDst = []AutoGroup{AutoGroupMember, AutoGroupTagged}
|
|
||||||
autogroupForSSHUser = []AutoGroup{AutoGroupNonRoot}
|
autogroupForSSHUser = []AutoGroup{AutoGroupNonRoot}
|
||||||
autogroupNotSupported = []AutoGroup{AutoGroupSelf}
|
autogroupNotSupported = []AutoGroup{AutoGroupSelf, AutoGroupMember, AutoGroupTagged}
|
||||||
)
|
)
|
||||||
|
|
||||||
func validateAutogroupSupported(ag *AutoGroup) error {
|
func validateAutogroupSupported(ag *AutoGroup) error {
|
||||||
|
@ -1548,24 +1251,6 @@ type SSH struct {
|
||||||
// It can be a list of usernames, groups, tags or autogroups.
|
// It can be a list of usernames, groups, tags or autogroups.
|
||||||
type SSHSrcAliases []Alias
|
type SSHSrcAliases []Alias
|
||||||
|
|
||||||
// MarshalJSON marshals the Groups to JSON.
|
|
||||||
func (g Groups) MarshalJSON() ([]byte, error) {
|
|
||||||
if g == nil {
|
|
||||||
return []byte("{}"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
raw := make(map[string][]string)
|
|
||||||
for group, usernames := range g {
|
|
||||||
users := make([]string, len(usernames))
|
|
||||||
for i, username := range usernames {
|
|
||||||
users[i] = string(username)
|
|
||||||
}
|
|
||||||
raw[string(group)] = users
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *SSHSrcAliases) UnmarshalJSON(b []byte) error {
|
func (a *SSHSrcAliases) UnmarshalJSON(b []byte) error {
|
||||||
var aliases []AliasEnc
|
var aliases []AliasEnc
|
||||||
err := json.Unmarshal(b, &aliases)
|
err := json.Unmarshal(b, &aliases)
|
||||||
|
@ -1579,98 +1264,12 @@ func (a *SSHSrcAliases) UnmarshalJSON(b []byte) error {
|
||||||
case *Username, *Group, *Tag, *AutoGroup:
|
case *Username, *Group, *Tag, *AutoGroup:
|
||||||
(*a)[i] = alias.Alias
|
(*a)[i] = alias.Alias
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf(
|
return fmt.Errorf("type %T not supported", alias.Alias)
|
||||||
"alias %T is not supported for SSH source",
|
|
||||||
alias.Alias,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *SSHDstAliases) UnmarshalJSON(b []byte) error {
|
|
||||||
var aliases []AliasEnc
|
|
||||||
err := json.Unmarshal(b, &aliases)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*a = make([]Alias, len(aliases))
|
|
||||||
for i, alias := range aliases {
|
|
||||||
switch alias.Alias.(type) {
|
|
||||||
case *Username, *Tag, *AutoGroup, *Host,
|
|
||||||
// Asterix and Group is actually not supposed to be supported,
|
|
||||||
// however we do not support autogroups at the moment
|
|
||||||
// so we will leave it in as there is no other option
|
|
||||||
// to dynamically give all access
|
|
||||||
// https://tailscale.com/kb/1193/tailscale-ssh#dst
|
|
||||||
// TODO(kradalby): remove this when we support autogroup:tagged and autogroup:member
|
|
||||||
Asterix:
|
|
||||||
(*a)[i] = alias.Alias
|
|
||||||
default:
|
|
||||||
return fmt.Errorf(
|
|
||||||
"alias %T is not supported for SSH destination",
|
|
||||||
alias.Alias,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the SSHDstAliases to JSON.
|
|
||||||
func (a SSHDstAliases) MarshalJSON() ([]byte, error) {
|
|
||||||
if a == nil {
|
|
||||||
return []byte("[]"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
aliases := make([]string, len(a))
|
|
||||||
for i, alias := range a {
|
|
||||||
switch v := alias.(type) {
|
|
||||||
case *Username:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Tag:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *AutoGroup:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Host:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case Asterix:
|
|
||||||
aliases[i] = "*"
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown SSH destination alias type: %T", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(aliases)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON marshals the SSHSrcAliases to JSON.
|
|
||||||
func (a SSHSrcAliases) MarshalJSON() ([]byte, error) {
|
|
||||||
if a == nil {
|
|
||||||
return []byte("[]"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
aliases := make([]string, len(a))
|
|
||||||
for i, alias := range a {
|
|
||||||
switch v := alias.(type) {
|
|
||||||
case *Username:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Group:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *Tag:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case *AutoGroup:
|
|
||||||
aliases[i] = string(*v)
|
|
||||||
case Asterix:
|
|
||||||
aliases[i] = "*"
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown SSH source alias type: %T", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(aliases)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a SSHSrcAliases) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
func (a SSHSrcAliases) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) {
|
||||||
var ips netipx.IPSetBuilder
|
var ips netipx.IPSetBuilder
|
||||||
var errs []error
|
var errs []error
|
||||||
|
@ -1691,17 +1290,38 @@ func (a SSHSrcAliases) Resolve(p *Policy, users types.Users, nodes types.Nodes)
|
||||||
// It can be a list of usernames, tags or autogroups.
|
// It can be a list of usernames, tags or autogroups.
|
||||||
type SSHDstAliases []Alias
|
type SSHDstAliases []Alias
|
||||||
|
|
||||||
|
func (a *SSHDstAliases) UnmarshalJSON(b []byte) error {
|
||||||
|
var aliases []AliasEnc
|
||||||
|
err := json.Unmarshal(b, &aliases)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*a = make([]Alias, len(aliases))
|
||||||
|
for i, alias := range aliases {
|
||||||
|
switch alias.Alias.(type) {
|
||||||
|
case *Username, *Tag, *AutoGroup,
|
||||||
|
// Asterix and Group is actually not supposed to be supported,
|
||||||
|
// however we do not support autogroups at the moment
|
||||||
|
// so we will leave it in as there is no other option
|
||||||
|
// to dynamically give all access
|
||||||
|
// https://tailscale.com/kb/1193/tailscale-ssh#dst
|
||||||
|
// TODO(kradalby): remove this when we support autogroup:tagged and autogroup:member
|
||||||
|
Asterix:
|
||||||
|
(*a)[i] = alias.Alias
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("type %T not supported", alias.Alias)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type SSHUser string
|
type SSHUser string
|
||||||
|
|
||||||
func (u SSHUser) String() string {
|
func (u SSHUser) String() string {
|
||||||
return string(u)
|
return string(u)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON marshals the SSHUser to JSON.
|
|
||||||
func (u SSHUser) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(string(u))
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalPolicy takes a byte slice and unmarshals it into a Policy struct.
|
// unmarshalPolicy takes a byte slice and unmarshals it into a Policy struct.
|
||||||
// In addition to unmarshalling, it will also validate the policy.
|
// In addition to unmarshalling, it will also validate the policy.
|
||||||
// This is the only entrypoint of reading a policy from a file or other source.
|
// This is the only entrypoint of reading a policy from a file or other source.
|
||||||
|
|
|
@ -10,9 +10,6 @@ import (
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
"github.com/juanfont/headscale/hscontrol/types"
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
"github.com/juanfont/headscale/hscontrol/util"
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"time"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go4.org/netipx"
|
"go4.org/netipx"
|
||||||
xmaps "golang.org/x/exp/maps"
|
xmaps "golang.org/x/exp/maps"
|
||||||
|
@ -22,83 +19,6 @@ import (
|
||||||
"tailscale.com/types/ptr"
|
"tailscale.com/types/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestUnmarshalPolicy tests the unmarshalling of JSON into Policy objects and the marshalling
|
|
||||||
// back to JSON (round-trip). It ensures that:
|
|
||||||
// 1. JSON can be correctly unmarshalled into a Policy object
|
|
||||||
// 2. A Policy object can be correctly marshalled back to JSON
|
|
||||||
// 3. The unmarshalled Policy matches the expected Policy
|
|
||||||
// 4. The marshalled and then unmarshalled Policy is semantically equivalent to the original
|
|
||||||
// (accounting for nil vs empty map/slice differences)
|
|
||||||
//
|
|
||||||
// This test also verifies that all the required struct fields are properly marshalled and
|
|
||||||
// unmarshalled, maintaining semantic equivalence through a complete JSON round-trip.
|
|
||||||
|
|
||||||
// TestMarshalJSON tests explicit marshalling of Policy objects to JSON.
|
|
||||||
// This test ensures our custom MarshalJSON methods properly encode
|
|
||||||
// the various data structures used in the Policy.
|
|
||||||
func TestMarshalJSON(t *testing.T) {
|
|
||||||
// Create a complex test policy
|
|
||||||
policy := &Policy{
|
|
||||||
Groups: Groups{
|
|
||||||
Group("group:example"): []Username{Username("user@example.com")},
|
|
||||||
},
|
|
||||||
Hosts: Hosts{
|
|
||||||
"host-1": Prefix(mp("100.100.100.100/32")),
|
|
||||||
},
|
|
||||||
TagOwners: TagOwners{
|
|
||||||
Tag("tag:test"): Owners{up("user@example.com")},
|
|
||||||
},
|
|
||||||
ACLs: []ACL{
|
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Protocol: "tcp",
|
|
||||||
Sources: Aliases{
|
|
||||||
ptr.To(Username("user@example.com")),
|
|
||||||
},
|
|
||||||
Destinations: []AliasWithPorts{
|
|
||||||
{
|
|
||||||
Alias: ptr.To(Username("other@example.com")),
|
|
||||||
Ports: []tailcfg.PortRange{{First: 80, Last: 80}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal the policy to JSON
|
|
||||||
marshalled, err := json.MarshalIndent(policy, "", " ")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Make sure all expected fields are present in the JSON
|
|
||||||
jsonString := string(marshalled)
|
|
||||||
assert.Contains(t, jsonString, "group:example")
|
|
||||||
assert.Contains(t, jsonString, "user@example.com")
|
|
||||||
assert.Contains(t, jsonString, "host-1")
|
|
||||||
assert.Contains(t, jsonString, "100.100.100.100/32")
|
|
||||||
assert.Contains(t, jsonString, "tag:test")
|
|
||||||
assert.Contains(t, jsonString, "accept")
|
|
||||||
assert.Contains(t, jsonString, "tcp")
|
|
||||||
assert.Contains(t, jsonString, "80")
|
|
||||||
|
|
||||||
// Unmarshal back to verify round trip
|
|
||||||
var roundTripped Policy
|
|
||||||
err = json.Unmarshal(marshalled, &roundTripped)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Compare the original and round-tripped policies
|
|
||||||
cmps := append(util.Comparers,
|
|
||||||
cmp.Comparer(func(x, y Prefix) bool {
|
|
||||||
return x == y
|
|
||||||
}),
|
|
||||||
cmpopts.IgnoreUnexported(Policy{}),
|
|
||||||
cmpopts.EquateEmpty(),
|
|
||||||
)
|
|
||||||
|
|
||||||
if diff := cmp.Diff(policy, &roundTripped, cmps...); diff != "" {
|
|
||||||
t.Fatalf("round trip policy (-original +roundtripped):\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnmarshalPolicy(t *testing.T) {
|
func TestUnmarshalPolicy(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -439,7 +359,7 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
`,
|
`,
|
||||||
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged]`,
|
wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet]`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "undefined-hostname-errors-2490",
|
name: "undefined-hostname-errors-2490",
|
||||||
|
@ -591,138 +511,6 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||||
`,
|
`,
|
||||||
wantErr: `"autogroup:internet" used in SSH destination, it can only be used in ACL destinations`,
|
wantErr: `"autogroup:internet" used in SSH destination, it can only be used in ACL destinations`,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "ssh-basic",
|
|
||||||
input: `
|
|
||||||
{
|
|
||||||
"groups": {
|
|
||||||
"group:admins": ["admin@example.com"]
|
|
||||||
},
|
|
||||||
"tagOwners": {
|
|
||||||
"tag:servers": ["group:admins"]
|
|
||||||
},
|
|
||||||
"ssh": [
|
|
||||||
{
|
|
||||||
"action": "accept",
|
|
||||||
"src": [
|
|
||||||
"group:admins"
|
|
||||||
],
|
|
||||||
"dst": [
|
|
||||||
"tag:servers"
|
|
||||||
],
|
|
||||||
"users": ["root", "admin"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
want: &Policy{
|
|
||||||
Groups: Groups{
|
|
||||||
Group("group:admins"): []Username{Username("admin@example.com")},
|
|
||||||
},
|
|
||||||
TagOwners: TagOwners{
|
|
||||||
Tag("tag:servers"): Owners{gp("group:admins")},
|
|
||||||
},
|
|
||||||
SSHs: []SSH{
|
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Sources: SSHSrcAliases{
|
|
||||||
gp("group:admins"),
|
|
||||||
},
|
|
||||||
Destinations: SSHDstAliases{
|
|
||||||
tp("tag:servers"),
|
|
||||||
},
|
|
||||||
Users: []SSHUser{
|
|
||||||
SSHUser("root"),
|
|
||||||
SSHUser("admin"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ssh-with-tag-and-user",
|
|
||||||
input: `
|
|
||||||
{
|
|
||||||
"tagOwners": {
|
|
||||||
"tag:web": ["admin@example.com"]
|
|
||||||
},
|
|
||||||
"ssh": [
|
|
||||||
{
|
|
||||||
"action": "accept",
|
|
||||||
"src": [
|
|
||||||
"tag:web"
|
|
||||||
],
|
|
||||||
"dst": [
|
|
||||||
"admin@example.com"
|
|
||||||
],
|
|
||||||
"users": ["*"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
want: &Policy{
|
|
||||||
TagOwners: TagOwners{
|
|
||||||
Tag("tag:web"): Owners{ptr.To(Username("admin@example.com"))},
|
|
||||||
},
|
|
||||||
SSHs: []SSH{
|
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Sources: SSHSrcAliases{
|
|
||||||
tp("tag:web"),
|
|
||||||
},
|
|
||||||
Destinations: SSHDstAliases{
|
|
||||||
ptr.To(Username("admin@example.com")),
|
|
||||||
},
|
|
||||||
Users: []SSHUser{
|
|
||||||
SSHUser("*"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ssh-with-check-period",
|
|
||||||
input: `
|
|
||||||
{
|
|
||||||
"groups": {
|
|
||||||
"group:admins": ["admin@example.com"]
|
|
||||||
},
|
|
||||||
"ssh": [
|
|
||||||
{
|
|
||||||
"action": "accept",
|
|
||||||
"src": [
|
|
||||||
"group:admins"
|
|
||||||
],
|
|
||||||
"dst": [
|
|
||||||
"admin@example.com"
|
|
||||||
],
|
|
||||||
"users": ["root"],
|
|
||||||
"checkPeriod": "24h"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
want: &Policy{
|
|
||||||
Groups: Groups{
|
|
||||||
Group("group:admins"): []Username{Username("admin@example.com")},
|
|
||||||
},
|
|
||||||
SSHs: []SSH{
|
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Sources: SSHSrcAliases{
|
|
||||||
gp("group:admins"),
|
|
||||||
},
|
|
||||||
Destinations: SSHDstAliases{
|
|
||||||
ptr.To(Username("admin@example.com")),
|
|
||||||
},
|
|
||||||
Users: []SSHUser{
|
|
||||||
SSHUser("root"),
|
|
||||||
},
|
|
||||||
CheckPeriod: model.Duration(24 * time.Hour),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "group-must-be-defined-acl-src",
|
name: "group-must-be-defined-acl-src",
|
||||||
input: `
|
input: `
|
||||||
|
@ -958,61 +746,29 @@ func TestUnmarshalPolicy(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cmps := append(util.Comparers,
|
cmps := append(util.Comparers, cmp.Comparer(func(x, y Prefix) bool {
|
||||||
cmp.Comparer(func(x, y Prefix) bool {
|
return x == y
|
||||||
return x == y
|
}))
|
||||||
}),
|
cmps = append(cmps, cmpopts.IgnoreUnexported(Policy{}))
|
||||||
cmpopts.IgnoreUnexported(Policy{}),
|
|
||||||
)
|
|
||||||
|
|
||||||
// For round-trip testing, we'll normalize the policies before comparing
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Test unmarshalling
|
|
||||||
policy, err := unmarshalPolicy([]byte(tt.input))
|
policy, err := unmarshalPolicy([]byte(tt.input))
|
||||||
if tt.wantErr == "" {
|
if tt.wantErr == "" {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unmarshalling: got %v; want no error", err)
|
t.Fatalf("got %v; want no error", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("unmarshalling: got nil; want error %q", tt.wantErr)
|
t.Fatalf("got nil; want error %q", tt.wantErr)
|
||||||
} else if !strings.Contains(err.Error(), tt.wantErr) {
|
} else if !strings.Contains(err.Error(), tt.wantErr) {
|
||||||
t.Fatalf("unmarshalling: got err %v; want error %q", err, tt.wantErr)
|
t.Fatalf("got err %v; want error %q", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
return // Skip the rest of the test if we expected an error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(tt.want, policy, cmps...); diff != "" {
|
if diff := cmp.Diff(tt.want, policy, cmps...); diff != "" {
|
||||||
t.Fatalf("unexpected policy (-want +got):\n%s", diff)
|
t.Fatalf("unexpected policy (-want +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test round-trip marshalling/unmarshalling
|
|
||||||
if policy != nil {
|
|
||||||
// Marshal the policy back to JSON
|
|
||||||
marshalled, err := json.MarshalIndent(policy, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("marshalling: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal it again
|
|
||||||
roundTripped, err := unmarshalPolicy(marshalled)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("round-trip unmarshalling: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add EquateEmpty to handle nil vs empty maps/slices
|
|
||||||
roundTripCmps := append(cmps,
|
|
||||||
cmpopts.EquateEmpty(),
|
|
||||||
cmpopts.IgnoreUnexported(Policy{}),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compare using the enhanced comparers for round-trip testing
|
|
||||||
if diff := cmp.Diff(policy, roundTripped, roundTripCmps...); diff != "" {
|
|
||||||
t.Fatalf("round trip policy (-original +roundtripped):\n%s", diff)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1242,135 +998,6 @@ func TestResolvePolicy(t *testing.T) {
|
||||||
toResolve: Wildcard,
|
toResolve: Wildcard,
|
||||||
want: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()},
|
want: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "autogroup-member-comprehensive",
|
|
||||||
toResolve: ptr.To(AutoGroup(AutoGroupMember)),
|
|
||||||
nodes: types.Nodes{
|
|
||||||
// Node with no tags (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
IPv4: ap("100.100.101.1"),
|
|
||||||
},
|
|
||||||
// Node with forced tags (should be excluded)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
ForcedTags: []string{"tag:test"},
|
|
||||||
IPv4: ap("100.100.101.2"),
|
|
||||||
},
|
|
||||||
// Node with allowed requested tag (should be excluded)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:test"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.3"),
|
|
||||||
},
|
|
||||||
// Node with non-allowed requested tag (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:notallowed"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.4"),
|
|
||||||
},
|
|
||||||
// Node with multiple requested tags, one allowed (should be excluded)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:test", "tag:notallowed"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.5"),
|
|
||||||
},
|
|
||||||
// Node with multiple requested tags, none allowed (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:notallowed1", "tag:notallowed2"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.6"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
pol: &Policy{
|
|
||||||
TagOwners: TagOwners{
|
|
||||||
Tag("tag:test"): Owners{ptr.To(Username("testuser@"))},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
want: []netip.Prefix{
|
|
||||||
mp("100.100.101.1/32"), // No tags
|
|
||||||
mp("100.100.101.4/32"), // Non-allowed requested tag
|
|
||||||
mp("100.100.101.6/32"), // Multiple non-allowed requested tags
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "autogroup-tagged",
|
|
||||||
toResolve: ptr.To(AutoGroup(AutoGroupTagged)),
|
|
||||||
nodes: types.Nodes{
|
|
||||||
// Node with no tags (should be excluded)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
IPv4: ap("100.100.101.1"),
|
|
||||||
},
|
|
||||||
// Node with forced tag (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
ForcedTags: []string{"tag:test"},
|
|
||||||
IPv4: ap("100.100.101.2"),
|
|
||||||
},
|
|
||||||
// Node with allowed requested tag (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:test"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.3"),
|
|
||||||
},
|
|
||||||
// Node with non-allowed requested tag (should be excluded)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:notallowed"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.4"),
|
|
||||||
},
|
|
||||||
// Node with multiple requested tags, one allowed (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:test", "tag:notallowed"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.5"),
|
|
||||||
},
|
|
||||||
// Node with multiple requested tags, none allowed (should be excluded)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
Hostinfo: &tailcfg.Hostinfo{
|
|
||||||
RequestTags: []string{"tag:notallowed1", "tag:notallowed2"},
|
|
||||||
},
|
|
||||||
IPv4: ap("100.100.101.6"),
|
|
||||||
},
|
|
||||||
// Node with multiple forced tags (should be included)
|
|
||||||
{
|
|
||||||
User: users["testuser"],
|
|
||||||
ForcedTags: []string{"tag:test", "tag:other"},
|
|
||||||
IPv4: ap("100.100.101.7"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
pol: &Policy{
|
|
||||||
TagOwners: TagOwners{
|
|
||||||
Tag("tag:test"): Owners{ptr.To(Username("testuser@"))},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
want: []netip.Prefix{
|
|
||||||
mp("100.100.101.2/31"), // Forced tag and allowed requested tag consecutive IPs are put in 31 prefix
|
|
||||||
mp("100.100.101.5/32"), // Multiple requested tags, one allowed
|
|
||||||
mp("100.100.101.7/32"), // Multiple forced tags
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "autogroup-invalid",
|
|
||||||
toResolve: ptr.To(AutoGroup("autogroup:invalid")),
|
|
||||||
wantErr: "unknown autogroup",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
@ -1534,7 +1161,7 @@ func TestResolveAutoApprovers(t *testing.T) {
|
||||||
name: "mixed-routes-and-exit-nodes",
|
name: "mixed-routes-and-exit-nodes",
|
||||||
policy: &Policy{
|
policy: &Policy{
|
||||||
Groups: Groups{
|
Groups: Groups{
|
||||||
"group:testgroup": Usernames{"user1@", "user2@"},
|
"group:testgroup": Usernames{"user1", "user2"},
|
||||||
},
|
},
|
||||||
AutoApprovers: AutoApproverPolicy{
|
AutoApprovers: AutoApproverPolicy{
|
||||||
Routes: map[netip.Prefix]AutoApprovers{
|
Routes: map[netip.Prefix]AutoApprovers{
|
||||||
|
|
|
@ -92,7 +92,7 @@ func runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath s
|
||||||
mux := tsql.NewMux()
|
mux := tsql.NewMux()
|
||||||
tsweb.Debugger(mux)
|
tsweb.Debugger(mux)
|
||||||
go http.Serve(lst, mux)
|
go http.Serve(lst, mux)
|
||||||
logf("TailSQL started")
|
logf("ailSQL started")
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
logf("TailSQL shutting down...")
|
logf("TailSQL shutting down...")
|
||||||
return tsNode.Close()
|
return tsNode.Close()
|
||||||
|
|
|
@ -28,7 +28,7 @@ func RegisterWeb(registrationID types.RegistrationID) *elem.Element {
|
||||||
elem.H2(nil, elem.Text("Machine registration")),
|
elem.H2(nil, elem.Text("Machine registration")),
|
||||||
elem.P(nil, elem.Text("Run the command below in the headscale server to add this machine to your network: ")),
|
elem.P(nil, elem.Text("Run the command below in the headscale server to add this machine to your network: ")),
|
||||||
elem.Code(attrs.Props{attrs.Style: codeStyleRegisterWebAPI.ToInline()},
|
elem.Code(attrs.Props{attrs.Style: codeStyleRegisterWebAPI.ToInline()},
|
||||||
elem.Text(fmt.Sprintf("headscale nodes register --key %s --user USERNAME", registrationID.String())),
|
elem.Text(fmt.Sprintf("headscale nodes register --user USERNAME --key %s", registrationID.String())),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,53 +7,50 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
|
||||||
"github.com/juanfont/headscale/hscontrol/types"
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
"tailscale.com/types/ptr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var veryLargeDestination = []policyv2.AliasWithPorts{
|
var veryLargeDestination = []string{
|
||||||
aliasWithPorts(prefixp("0.0.0.0/5"), tailcfg.PortRangeAny),
|
"0.0.0.0/5:*",
|
||||||
aliasWithPorts(prefixp("8.0.0.0/7"), tailcfg.PortRangeAny),
|
"8.0.0.0/7:*",
|
||||||
aliasWithPorts(prefixp("11.0.0.0/8"), tailcfg.PortRangeAny),
|
"11.0.0.0/8:*",
|
||||||
aliasWithPorts(prefixp("12.0.0.0/6"), tailcfg.PortRangeAny),
|
"12.0.0.0/6:*",
|
||||||
aliasWithPorts(prefixp("16.0.0.0/4"), tailcfg.PortRangeAny),
|
"16.0.0.0/4:*",
|
||||||
aliasWithPorts(prefixp("32.0.0.0/3"), tailcfg.PortRangeAny),
|
"32.0.0.0/3:*",
|
||||||
aliasWithPorts(prefixp("64.0.0.0/2"), tailcfg.PortRangeAny),
|
"64.0.0.0/2:*",
|
||||||
aliasWithPorts(prefixp("128.0.0.0/3"), tailcfg.PortRangeAny),
|
"128.0.0.0/3:*",
|
||||||
aliasWithPorts(prefixp("160.0.0.0/5"), tailcfg.PortRangeAny),
|
"160.0.0.0/5:*",
|
||||||
aliasWithPorts(prefixp("168.0.0.0/6"), tailcfg.PortRangeAny),
|
"168.0.0.0/6:*",
|
||||||
aliasWithPorts(prefixp("172.0.0.0/12"), tailcfg.PortRangeAny),
|
"172.0.0.0/12:*",
|
||||||
aliasWithPorts(prefixp("172.32.0.0/11"), tailcfg.PortRangeAny),
|
"172.32.0.0/11:*",
|
||||||
aliasWithPorts(prefixp("172.64.0.0/10"), tailcfg.PortRangeAny),
|
"172.64.0.0/10:*",
|
||||||
aliasWithPorts(prefixp("172.128.0.0/9"), tailcfg.PortRangeAny),
|
"172.128.0.0/9:*",
|
||||||
aliasWithPorts(prefixp("173.0.0.0/8"), tailcfg.PortRangeAny),
|
"173.0.0.0/8:*",
|
||||||
aliasWithPorts(prefixp("174.0.0.0/7"), tailcfg.PortRangeAny),
|
"174.0.0.0/7:*",
|
||||||
aliasWithPorts(prefixp("176.0.0.0/4"), tailcfg.PortRangeAny),
|
"176.0.0.0/4:*",
|
||||||
aliasWithPorts(prefixp("192.0.0.0/9"), tailcfg.PortRangeAny),
|
"192.0.0.0/9:*",
|
||||||
aliasWithPorts(prefixp("192.128.0.0/11"), tailcfg.PortRangeAny),
|
"192.128.0.0/11:*",
|
||||||
aliasWithPorts(prefixp("192.160.0.0/13"), tailcfg.PortRangeAny),
|
"192.160.0.0/13:*",
|
||||||
aliasWithPorts(prefixp("192.169.0.0/16"), tailcfg.PortRangeAny),
|
"192.169.0.0/16:*",
|
||||||
aliasWithPorts(prefixp("192.170.0.0/15"), tailcfg.PortRangeAny),
|
"192.170.0.0/15:*",
|
||||||
aliasWithPorts(prefixp("192.172.0.0/14"), tailcfg.PortRangeAny),
|
"192.172.0.0/14:*",
|
||||||
aliasWithPorts(prefixp("192.176.0.0/12"), tailcfg.PortRangeAny),
|
"192.176.0.0/12:*",
|
||||||
aliasWithPorts(prefixp("192.192.0.0/10"), tailcfg.PortRangeAny),
|
"192.192.0.0/10:*",
|
||||||
aliasWithPorts(prefixp("193.0.0.0/8"), tailcfg.PortRangeAny),
|
"193.0.0.0/8:*",
|
||||||
aliasWithPorts(prefixp("194.0.0.0/7"), tailcfg.PortRangeAny),
|
"194.0.0.0/7:*",
|
||||||
aliasWithPorts(prefixp("196.0.0.0/6"), tailcfg.PortRangeAny),
|
"196.0.0.0/6:*",
|
||||||
aliasWithPorts(prefixp("200.0.0.0/5"), tailcfg.PortRangeAny),
|
"200.0.0.0/5:*",
|
||||||
aliasWithPorts(prefixp("208.0.0.0/4"), tailcfg.PortRangeAny),
|
"208.0.0.0/4:*",
|
||||||
}
|
}
|
||||||
|
|
||||||
func aclScenario(
|
func aclScenario(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
policy *policyv2.Policy,
|
policy *policyv1.ACLPolicy,
|
||||||
clientsPerUser int,
|
clientsPerUser int,
|
||||||
) *Scenario {
|
) *Scenario {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
@ -111,21 +108,19 @@ func TestACLHostsInNetMapTable(t *testing.T) {
|
||||||
// they can access minus one (them self).
|
// they can access minus one (them self).
|
||||||
tests := map[string]struct {
|
tests := map[string]struct {
|
||||||
users ScenarioSpec
|
users ScenarioSpec
|
||||||
policy policyv2.Policy
|
policy policyv1.ACLPolicy
|
||||||
want map[string]int
|
want map[string]int
|
||||||
}{
|
}{
|
||||||
// Test that when we have no ACL, each client netmap has
|
// Test that when we have no ACL, each client netmap has
|
||||||
// the amount of peers of the total amount of clients
|
// the amount of peers of the total amount of clients
|
||||||
"base-acls": {
|
"base-acls": {
|
||||||
users: spec,
|
users: spec,
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, want: map[string]int{
|
}, want: map[string]int{
|
||||||
|
@ -138,21 +133,17 @@ func TestACLHostsInNetMapTable(t *testing.T) {
|
||||||
// their own user.
|
// their own user.
|
||||||
"two-isolated-users": {
|
"two-isolated-users": {
|
||||||
users: spec,
|
users: spec,
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user1@:*"},
|
||||||
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user2@")},
|
Sources: []string{"user2@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:*"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, want: map[string]int{
|
}, want: map[string]int{
|
||||||
|
@ -165,35 +156,27 @@ func TestACLHostsInNetMapTable(t *testing.T) {
|
||||||
// in the netmap.
|
// in the netmap.
|
||||||
"two-restricted-present-in-netmap": {
|
"two-restricted-present-in-netmap": {
|
||||||
users: spec,
|
users: spec,
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user1@:22"},
|
||||||
aliasWithPorts(usernamep("user1@"), tailcfg.PortRange{First: 22, Last: 22}),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user2@")},
|
Sources: []string{"user2@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:22"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 22, Last: 22}),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:22"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 22, Last: 22}),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user2@")},
|
Sources: []string{"user2@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user1@:22"},
|
||||||
aliasWithPorts(usernamep("user1@"), tailcfg.PortRange{First: 22, Last: 22}),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, want: map[string]int{
|
}, want: map[string]int{
|
||||||
|
@ -207,28 +190,22 @@ func TestACLHostsInNetMapTable(t *testing.T) {
|
||||||
// need them present on the other side for the "return path".
|
// need them present on the other side for the "return path".
|
||||||
"two-ns-one-isolated": {
|
"two-ns-one-isolated": {
|
||||||
users: spec,
|
users: spec,
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user1@:*"},
|
||||||
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user2@")},
|
Sources: []string{"user2@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:*"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:*"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, want: map[string]int{
|
}, want: map[string]int{
|
||||||
|
@ -238,37 +215,22 @@ func TestACLHostsInNetMapTable(t *testing.T) {
|
||||||
},
|
},
|
||||||
"very-large-destination-prefix-1372": {
|
"very-large-destination-prefix-1372": {
|
||||||
users: spec,
|
users: spec,
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: append(
|
Destinations: append([]string{"user1@:*"}, veryLargeDestination...),
|
||||||
[]policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(usernamep("user1@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
veryLargeDestination...,
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user2@")},
|
Sources: []string{"user2@"},
|
||||||
Destinations: append(
|
Destinations: append([]string{"user2@:*"}, veryLargeDestination...),
|
||||||
[]policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
veryLargeDestination...,
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: append(
|
Destinations: append([]string{"user2@:*"}, veryLargeDestination...),
|
||||||
[]policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
veryLargeDestination...,
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, want: map[string]int{
|
}, want: map[string]int{
|
||||||
|
@ -278,15 +240,12 @@ func TestACLHostsInNetMapTable(t *testing.T) {
|
||||||
},
|
},
|
||||||
"ipv6-acls-1470": {
|
"ipv6-acls-1470": {
|
||||||
users: spec,
|
users: spec,
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"0.0.0.0/0:*", "::/0:*"},
|
||||||
aliasWithPorts(prefixp("0.0.0.0/0"), tailcfg.PortRangeAny),
|
|
||||||
aliasWithPorts(prefixp("::/0"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, want: map[string]int{
|
}, want: map[string]int{
|
||||||
|
@ -336,14 +295,12 @@ func TestACLAllowUser80Dst(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
scenario := aclScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:80"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRange{First: 80, Last: 80}),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -392,17 +349,15 @@ func TestACLDenyAllPort80(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
scenario := aclScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:integration-acl-test"): []policyv2.Username{policyv2.Username("user1@"), policyv2.Username("user2@")},
|
"group:integration-acl-test": {"user1@", "user2@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{groupp("group:integration-acl-test")},
|
Sources: []string{"group:integration-acl-test"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:22"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRange{First: 22, Last: 22}),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -441,14 +396,12 @@ func TestACLAllowUserDst(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
scenario := aclScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:*"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -499,14 +452,12 @@ func TestACLAllowStarDst(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
scenario := aclScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -558,18 +509,16 @@ func TestACLNamedHostsCanReachBySubnet(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
scenario := aclScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Hosts: policyv2.Hosts{
|
Hosts: policyv1.Hosts{
|
||||||
"all": policyv2.Prefix(netip.MustParsePrefix("100.64.0.0/24")),
|
"all": netip.MustParsePrefix("100.64.0.0/24"),
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
// Everyone can curl test3
|
// Everyone can curl test3
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"all:*"},
|
||||||
aliasWithPorts(hostp("all"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -657,58 +606,50 @@ func TestACLNamedHostsCanReach(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
tests := map[string]struct {
|
tests := map[string]struct {
|
||||||
policy policyv2.Policy
|
policy policyv1.ACLPolicy
|
||||||
}{
|
}{
|
||||||
"ipv4": {
|
"ipv4": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
Hosts: policyv2.Hosts{
|
Hosts: policyv1.Hosts{
|
||||||
"test1": policyv2.Prefix(netip.MustParsePrefix("100.64.0.1/32")),
|
"test1": netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
"test2": policyv2.Prefix(netip.MustParsePrefix("100.64.0.2/32")),
|
"test2": netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
"test3": policyv2.Prefix(netip.MustParsePrefix("100.64.0.3/32")),
|
"test3": netip.MustParsePrefix("100.64.0.3/32"),
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
// Everyone can curl test3
|
// Everyone can curl test3
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"test3:*"},
|
||||||
aliasWithPorts(hostp("test3"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
// test1 can curl test2
|
// test1 can curl test2
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{hostp("test1")},
|
Sources: []string{"test1"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"test2:*"},
|
||||||
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"ipv6": {
|
"ipv6": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
Hosts: policyv2.Hosts{
|
Hosts: policyv1.Hosts{
|
||||||
"test1": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::1/128")),
|
"test1": netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
"test2": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::2/128")),
|
"test2": netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
"test3": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::3/128")),
|
"test3": netip.MustParsePrefix("fd7a:115c:a1e0::3/128"),
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
// Everyone can curl test3
|
// Everyone can curl test3
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"test3:*"},
|
||||||
aliasWithPorts(hostp("test3"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
// test1 can curl test2
|
// test1 can curl test2
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{hostp("test1")},
|
Sources: []string{"test1"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"test2:*"},
|
||||||
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -914,81 +855,71 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
|
|
||||||
tests := map[string]struct {
|
tests := map[string]struct {
|
||||||
policy policyv2.Policy
|
policy policyv1.ACLPolicy
|
||||||
}{
|
}{
|
||||||
"ipv4": {
|
"ipv4": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{prefixp("100.64.0.1/32")},
|
Sources: []string{"100.64.0.1"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"100.64.0.2:*"},
|
||||||
aliasWithPorts(prefixp("100.64.0.2/32"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"ipv6": {
|
"ipv6": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{prefixp("fd7a:115c:a1e0::1/128")},
|
Sources: []string{"fd7a:115c:a1e0::1"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"fd7a:115c:a1e0::2:*"},
|
||||||
aliasWithPorts(prefixp("fd7a:115c:a1e0::2/128"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"hostv4cidr": {
|
"hostv4cidr": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
Hosts: policyv2.Hosts{
|
Hosts: policyv1.Hosts{
|
||||||
"test1": policyv2.Prefix(netip.MustParsePrefix("100.64.0.1/32")),
|
"test1": netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
"test2": policyv2.Prefix(netip.MustParsePrefix("100.64.0.2/32")),
|
"test2": netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{hostp("test1")},
|
Sources: []string{"test1"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"test2:*"},
|
||||||
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"hostv6cidr": {
|
"hostv6cidr": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
Hosts: policyv2.Hosts{
|
Hosts: policyv1.Hosts{
|
||||||
"test1": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::1/128")),
|
"test1": netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
"test2": policyv2.Prefix(netip.MustParsePrefix("fd7a:115c:a1e0::2/128")),
|
"test2": netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{hostp("test1")},
|
Sources: []string{"test1"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"test2:*"},
|
||||||
aliasWithPorts(hostp("test2"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"group": {
|
"group": {
|
||||||
policy: policyv2.Policy{
|
policy: policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:one"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:one": {"user1@"},
|
||||||
policyv2.Group("group:two"): []policyv2.Username{policyv2.Username("user2@")},
|
"group:two": {"user2@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{groupp("group:one")},
|
Sources: []string{"group:one"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"group:two:*"},
|
||||||
aliasWithPorts(groupp("group:two"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1142,17 +1073,15 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||||
headscale, err := scenario.Headscale()
|
headscale, err := scenario.Headscale()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
p := policyv2.Policy{
|
p := policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{usernamep("user1@")},
|
Sources: []string{"user1@"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"user2@:*"},
|
||||||
aliasWithPorts(usernamep("user2@"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Hosts: policyv2.Hosts{},
|
Hosts: policyv1.Hosts{},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = headscale.SetPolicy(&p)
|
err = headscale.SetPolicy(&p)
|
||||||
|
@ -1160,7 +1089,7 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||||
|
|
||||||
// Get the current policy and check
|
// Get the current policy and check
|
||||||
// if it is the same as the one we set.
|
// if it is the same as the one we set.
|
||||||
var output *policyv2.Policy
|
var output *policyv1.ACLPolicy
|
||||||
err = executeAndUnmarshal(
|
err = executeAndUnmarshal(
|
||||||
headscale,
|
headscale,
|
||||||
[]string{
|
[]string{
|
||||||
|
@ -1176,7 +1105,7 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||||
|
|
||||||
assert.Len(t, output.ACLs, 1)
|
assert.Len(t, output.ACLs, 1)
|
||||||
|
|
||||||
if diff := cmp.Diff(p, *output, cmpopts.IgnoreUnexported(policyv2.Policy{}), cmpopts.EquateEmpty()); diff != "" {
|
if diff := cmp.Diff(p, *output); diff != "" {
|
||||||
t.Errorf("unexpected policy(-want +got):\n%s", diff)
|
t.Errorf("unexpected policy(-want +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1210,120 +1139,3 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestACLAutogroupMember(t *testing.T) {
|
|
||||||
IntegrationSkip(t)
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
|
||||||
&policyv2.Policy{
|
|
||||||
ACLs: []policyv2.ACL{
|
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
|
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(ptr.To(policyv2.AutoGroupMember), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
2,
|
|
||||||
)
|
|
||||||
defer scenario.ShutdownAssertNoPanics(t)
|
|
||||||
|
|
||||||
allClients, err := scenario.ListTailscaleClients()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = scenario.WaitForTailscaleSync()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Test that untagged nodes can access each other
|
|
||||||
for _, client := range allClients {
|
|
||||||
status, err := client.Status()
|
|
||||||
require.NoError(t, err)
|
|
||||||
if status.Self.Tags != nil && status.Self.Tags.Len() > 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, peer := range allClients {
|
|
||||||
if client.Hostname() == peer.Hostname() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
status, err := peer.Status()
|
|
||||||
require.NoError(t, err)
|
|
||||||
if status.Self.Tags != nil && status.Self.Tags.Len() > 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fqdn, err := peer.FQDN()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
|
||||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
|
||||||
|
|
||||||
result, err := client.Curl(url)
|
|
||||||
assert.Len(t, result, 13)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestACLAutogroupTagged(t *testing.T) {
|
|
||||||
IntegrationSkip(t)
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
scenario := aclScenario(t,
|
|
||||||
&policyv2.Policy{
|
|
||||||
ACLs: []policyv2.ACL{
|
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupTagged)},
|
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(ptr.To(policyv2.AutoGroupTagged), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
2,
|
|
||||||
)
|
|
||||||
defer scenario.ShutdownAssertNoPanics(t)
|
|
||||||
|
|
||||||
allClients, err := scenario.ListTailscaleClients()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = scenario.WaitForTailscaleSync()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Test that tagged nodes can access each other
|
|
||||||
for _, client := range allClients {
|
|
||||||
status, err := client.Status()
|
|
||||||
require.NoError(t, err)
|
|
||||||
if status.Self.Tags == nil || status.Self.Tags.Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, peer := range allClients {
|
|
||||||
if client.Hostname() == peer.Hostname() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
status, err := peer.Status()
|
|
||||||
require.NoError(t, err)
|
|
||||||
if status.Self.Tags == nil || status.Self.Tags.Len() == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fqdn, err := peer.FQDN()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
|
||||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
|
||||||
|
|
||||||
result, err := client.Curl(url)
|
|
||||||
assert.Len(t, result, 13)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -12,13 +12,12 @@ import (
|
||||||
tcmp "github.com/google/go-cmp/cmp"
|
tcmp "github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
"github.com/juanfont/headscale/hscontrol/types"
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -913,15 +912,13 @@ func TestNodeTagCommand(t *testing.T) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
func TestNodeAdvertiseTagCommand(t *testing.T) {
|
func TestNodeAdvertiseTagCommand(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
policy *policyv2.Policy
|
policy *policyv1.ACLPolicy
|
||||||
wantTag bool
|
wantTag bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
@ -930,60 +927,51 @@ func TestNodeAdvertiseTagCommand(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "with-policy-email",
|
name: "with-policy-email",
|
||||||
policy: &policyv2.Policy{
|
policy: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Protocol: "tcp",
|
Sources: []string{"*"},
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:test"): policyv2.Owners{usernameOwner("user1@test.no")},
|
"tag:test": {"user1@test.no"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantTag: true,
|
wantTag: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "with-policy-username",
|
name: "with-policy-username",
|
||||||
policy: &policyv2.Policy{
|
policy: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Protocol: "tcp",
|
Sources: []string{"*"},
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:test"): policyv2.Owners{usernameOwner("user1@")},
|
"tag:test": {"user1@"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantTag: true,
|
wantTag: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "with-policy-groups",
|
name: "with-policy-groups",
|
||||||
policy: &policyv2.Policy{
|
policy: &policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: policyv1.Groups{
|
||||||
policyv2.Group("group:admins"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:admins": []string{"user1@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Protocol: "tcp",
|
Sources: []string{"*"},
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:test"): policyv2.Owners{groupOwner("group:admins")},
|
"tag:test": {"group:admins"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantTag: true,
|
wantTag: true,
|
||||||
|
@ -1758,19 +1746,16 @@ func TestPolicyCommand(t *testing.T) {
|
||||||
headscale, err := scenario.Headscale()
|
headscale, err := scenario.Headscale()
|
||||||
assertNoErr(t, err)
|
assertNoErr(t, err)
|
||||||
|
|
||||||
p := policyv2.Policy{
|
p := policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Protocol: "tcp",
|
Sources: []string{"*"},
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:exists"): policyv2.Owners{usernameOwner("user1@")},
|
"tag:exists": {"user1@"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1797,7 +1782,7 @@ func TestPolicyCommand(t *testing.T) {
|
||||||
|
|
||||||
// Get the current policy and check
|
// Get the current policy and check
|
||||||
// if it is the same as the one we set.
|
// if it is the same as the one we set.
|
||||||
var output *policyv2.Policy
|
var output *policyv1.ACLPolicy
|
||||||
err = executeAndUnmarshal(
|
err = executeAndUnmarshal(
|
||||||
headscale,
|
headscale,
|
||||||
[]string{
|
[]string{
|
||||||
|
@ -1840,21 +1825,18 @@ func TestPolicyBrokenConfigCommand(t *testing.T) {
|
||||||
headscale, err := scenario.Headscale()
|
headscale, err := scenario.Headscale()
|
||||||
assertNoErr(t, err)
|
assertNoErr(t, err)
|
||||||
|
|
||||||
p := policyv2.Policy{
|
p := policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
// This is an unknown action, so it will return an error
|
// This is an unknown action, so it will return an error
|
||||||
// and the config will not be applied.
|
// and the config will not be applied.
|
||||||
Action: "unknown-action",
|
Action: "unknown-action",
|
||||||
Protocol: "tcp",
|
Sources: []string{"*"},
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:exists"): policyv2.Owners{usernameOwner("user1@")},
|
"tag:exists": {"user1@"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
"github.com/ory/dockertest/v3"
|
"github.com/ory/dockertest/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,5 +28,5 @@ type ControlServer interface {
|
||||||
ApproveRoutes(uint64, []netip.Prefix) (*v1.Node, error)
|
ApproveRoutes(uint64, []netip.Prefix) (*v1.Node, error)
|
||||||
GetCert() []byte
|
GetCert() []byte
|
||||||
GetHostname() string
|
GetHostname() string
|
||||||
SetPolicy(*policyv2.Policy) error
|
SetPolicy(*policyv1.ACLPolicy) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
"github.com/juanfont/headscale/hscontrol/types"
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
"github.com/juanfont/headscale/hscontrol/util"
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||||
|
@ -65,7 +65,7 @@ type HeadscaleInContainer struct {
|
||||||
extraPorts []string
|
extraPorts []string
|
||||||
caCerts [][]byte
|
caCerts [][]byte
|
||||||
hostPortBindings map[string][]string
|
hostPortBindings map[string][]string
|
||||||
aclPolicy *policyv2.Policy
|
aclPolicy *policyv1.ACLPolicy
|
||||||
env map[string]string
|
env map[string]string
|
||||||
tlsCert []byte
|
tlsCert []byte
|
||||||
tlsKey []byte
|
tlsKey []byte
|
||||||
|
@ -80,7 +80,7 @@ type Option = func(c *HeadscaleInContainer)
|
||||||
|
|
||||||
// WithACLPolicy adds a hscontrol.ACLPolicy policy to the
|
// WithACLPolicy adds a hscontrol.ACLPolicy policy to the
|
||||||
// HeadscaleInContainer instance.
|
// HeadscaleInContainer instance.
|
||||||
func WithACLPolicy(acl *policyv2.Policy) Option {
|
func WithACLPolicy(acl *policyv1.ACLPolicy) Option {
|
||||||
return func(hsic *HeadscaleInContainer) {
|
return func(hsic *HeadscaleInContainer) {
|
||||||
if acl == nil {
|
if acl == nil {
|
||||||
return
|
return
|
||||||
|
@ -188,6 +188,13 @@ func WithPostgres() Option {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithPolicyV1 tells the integration test to use the old v1 filter.
|
||||||
|
func WithPolicyV1() Option {
|
||||||
|
return func(hsic *HeadscaleInContainer) {
|
||||||
|
hsic.env["HEADSCALE_POLICY_V1"] = "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithPolicy sets the policy mode for headscale
|
// WithPolicy sets the policy mode for headscale
|
||||||
func WithPolicyMode(mode types.PolicyMode) Option {
|
func WithPolicyMode(mode types.PolicyMode) Option {
|
||||||
return func(hsic *HeadscaleInContainer) {
|
return func(hsic *HeadscaleInContainer) {
|
||||||
|
@ -882,7 +889,7 @@ func (t *HeadscaleInContainer) MapUsers() (map[string]*v1.User, error) {
|
||||||
return userMap, nil
|
return userMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HeadscaleInContainer) SetPolicy(pol *policyv2.Policy) error {
|
func (h *HeadscaleInContainer) SetPolicy(pol *policyv1.ACLPolicy) error {
|
||||||
err := h.writePolicy(pol)
|
err := h.writePolicy(pol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writing policy file: %w", err)
|
return fmt.Errorf("writing policy file: %w", err)
|
||||||
|
@ -923,7 +930,7 @@ func (h *HeadscaleInContainer) reloadDatabasePolicy() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HeadscaleInContainer) writePolicy(pol *policyv2.Policy) error {
|
func (h *HeadscaleInContainer) writePolicy(pol *policyv1.ACLPolicy) error {
|
||||||
pBytes, err := json.Marshal(pol)
|
pBytes, err := json.Marshal(pol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("marshalling pol: %w", err)
|
return fmt.Errorf("marshalling pol: %w", err)
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -14,7 +13,7 @@ import (
|
||||||
cmpdiff "github.com/google/go-cmp/cmp"
|
cmpdiff "github.com/google/go-cmp/cmp"
|
||||||
"github.com/google/go-cmp/cmp/cmpopts"
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
"github.com/juanfont/headscale/hscontrol/types"
|
"github.com/juanfont/headscale/hscontrol/types"
|
||||||
"github.com/juanfont/headscale/hscontrol/util"
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
|
@ -23,7 +22,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
"tailscale.com/net/tsaddr"
|
"tailscale.com/net/tsaddr"
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
"tailscale.com/types/ipproto"
|
"tailscale.com/types/ipproto"
|
||||||
"tailscale.com/types/views"
|
"tailscale.com/types/views"
|
||||||
"tailscale.com/util/must"
|
"tailscale.com/util/must"
|
||||||
|
@ -795,25 +793,26 @@ func TestSubnetRouteACL(t *testing.T) {
|
||||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{
|
err = scenario.CreateHeadscaleEnv([]tsic.Option{
|
||||||
tsic.WithAcceptRoutes(),
|
tsic.WithAcceptRoutes(),
|
||||||
}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
|
}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: policyv1.Groups{
|
||||||
policyv2.Group("group:admins"): []policyv2.Username{policyv2.Username(user + "@")},
|
"group:admins": {user + "@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{groupp("group:admins")},
|
Sources: []string{"group:admins"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"group:admins:*"},
|
||||||
aliasWithPorts(groupp("group:admins"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{groupp("group:admins")},
|
Sources: []string{"group:admins"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"10.33.0.0/16:*"},
|
||||||
aliasWithPorts(prefixp("10.33.0.0/16"), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
// {
|
||||||
|
// Action: "accept",
|
||||||
|
// Sources: []string{"group:admins"},
|
||||||
|
// Destinations: []string{"0.0.0.0/0:*"},
|
||||||
|
// },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
|
@ -1385,31 +1384,29 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
pol *policyv2.Policy
|
pol *policyv1.ACLPolicy
|
||||||
approver string
|
approver string
|
||||||
spec ScenarioSpec
|
spec ScenarioSpec
|
||||||
withURL bool
|
withURL bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "authkey-tag",
|
name: "authkey-tag",
|
||||||
pol: &policyv2.Policy{
|
pol: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:approve"): policyv2.Owners{usernameOwner("user1@")},
|
"tag:approve": {"user1@"},
|
||||||
},
|
},
|
||||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
AutoApprovers: policyv1.AutoApprovers{
|
||||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
Routes: map[string][]string{
|
||||||
bigRoute: {tagApprover("tag:approve")},
|
bigRoute.String(): {"tag:approve"},
|
||||||
},
|
},
|
||||||
ExitNode: policyv2.AutoApprovers{tagApprover("tag:approve")},
|
ExitNode: []string{"tag:approve"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
approver: "tag:approve",
|
approver: "tag:approve",
|
||||||
|
@ -1430,21 +1427,19 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "authkey-user",
|
name: "authkey-user",
|
||||||
pol: &policyv2.Policy{
|
pol: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
AutoApprovers: policyv1.AutoApprovers{
|
||||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
Routes: map[string][]string{
|
||||||
bigRoute: {usernameApprover("user1@")},
|
bigRoute.String(): {"user1@"},
|
||||||
},
|
},
|
||||||
ExitNode: policyv2.AutoApprovers{usernameApprover("user1@")},
|
ExitNode: []string{"user1@"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
approver: "user1@",
|
approver: "user1@",
|
||||||
|
@ -1465,24 +1460,22 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "authkey-group",
|
name: "authkey-group",
|
||||||
pol: &policyv2.Policy{
|
pol: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Groups: policyv2.Groups{
|
Groups: policyv1.Groups{
|
||||||
policyv2.Group("group:approve"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:approve": []string{"user1@"},
|
||||||
},
|
},
|
||||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
AutoApprovers: policyv1.AutoApprovers{
|
||||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
Routes: map[string][]string{
|
||||||
bigRoute: {groupApprover("group:approve")},
|
bigRoute.String(): {"group:approve"},
|
||||||
},
|
},
|
||||||
ExitNode: policyv2.AutoApprovers{groupApprover("group:approve")},
|
ExitNode: []string{"group:approve"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
approver: "group:approve",
|
approver: "group:approve",
|
||||||
|
@ -1503,21 +1496,19 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "webauth-user",
|
name: "webauth-user",
|
||||||
pol: &policyv2.Policy{
|
pol: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
AutoApprovers: policyv1.AutoApprovers{
|
||||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
Routes: map[string][]string{
|
||||||
bigRoute: {usernameApprover("user1@")},
|
bigRoute.String(): {"user1@"},
|
||||||
},
|
},
|
||||||
ExitNode: policyv2.AutoApprovers{usernameApprover("user1@")},
|
ExitNode: []string{"user1@"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
approver: "user1@",
|
approver: "user1@",
|
||||||
|
@ -1539,24 +1530,22 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "webauth-tag",
|
name: "webauth-tag",
|
||||||
pol: &policyv2.Policy{
|
pol: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TagOwners: policyv2.TagOwners{
|
TagOwners: map[string][]string{
|
||||||
policyv2.Tag("tag:approve"): policyv2.Owners{usernameOwner("user1@")},
|
"tag:approve": {"user1@"},
|
||||||
},
|
},
|
||||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
AutoApprovers: policyv1.AutoApprovers{
|
||||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
Routes: map[string][]string{
|
||||||
bigRoute: {tagApprover("tag:approve")},
|
bigRoute.String(): {"tag:approve"},
|
||||||
},
|
},
|
||||||
ExitNode: policyv2.AutoApprovers{tagApprover("tag:approve")},
|
ExitNode: []string{"tag:approve"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
approver: "tag:approve",
|
approver: "tag:approve",
|
||||||
|
@ -1578,24 +1567,22 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "webauth-group",
|
name: "webauth-group",
|
||||||
pol: &policyv2.Policy{
|
pol: &policyv1.ACLPolicy{
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Sources: []string{"*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
Destinations: []string{"*:*"},
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Groups: policyv2.Groups{
|
Groups: policyv1.Groups{
|
||||||
policyv2.Group("group:approve"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:approve": []string{"user1@"},
|
||||||
},
|
},
|
||||||
AutoApprovers: policyv2.AutoApproverPolicy{
|
AutoApprovers: policyv1.AutoApprovers{
|
||||||
Routes: map[netip.Prefix]policyv2.AutoApprovers{
|
Routes: map[string][]string{
|
||||||
bigRoute: {groupApprover("group:approve")},
|
bigRoute.String(): {"group:approve"},
|
||||||
},
|
},
|
||||||
ExitNode: policyv2.AutoApprovers{groupApprover("group:approve")},
|
ExitNode: []string{"group:approve"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
approver: "group:approve",
|
approver: "group:approve",
|
||||||
|
@ -1670,20 +1657,7 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
assert.NotNil(t, headscale)
|
assert.NotNil(t, headscale)
|
||||||
|
|
||||||
// Set the route of usernet1 to be autoapproved
|
// Set the route of usernet1 to be autoapproved
|
||||||
var approvers policyv2.AutoApprovers
|
tt.pol.AutoApprovers.Routes[route.String()] = []string{tt.approver}
|
||||||
switch {
|
|
||||||
case strings.HasPrefix(tt.approver, "tag:"):
|
|
||||||
approvers = append(approvers, tagApprover(tt.approver))
|
|
||||||
case strings.HasPrefix(tt.approver, "group:"):
|
|
||||||
approvers = append(approvers, groupApprover(tt.approver))
|
|
||||||
default:
|
|
||||||
approvers = append(approvers, usernameApprover(tt.approver))
|
|
||||||
}
|
|
||||||
if tt.pol.AutoApprovers.Routes == nil {
|
|
||||||
tt.pol.AutoApprovers.Routes = make(map[netip.Prefix]policyv2.AutoApprovers)
|
|
||||||
}
|
|
||||||
prefix := *route
|
|
||||||
tt.pol.AutoApprovers.Routes[prefix] = approvers
|
|
||||||
err = headscale.SetPolicy(tt.pol)
|
err = headscale.SetPolicy(tt.pol)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1793,8 +1767,7 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
|
||||||
|
|
||||||
// Remove the auto approval from the policy, any routes already enabled should be allowed.
|
// Remove the auto approval from the policy, any routes already enabled should be allowed.
|
||||||
prefix = *route
|
delete(tt.pol.AutoApprovers.Routes, route.String())
|
||||||
delete(tt.pol.AutoApprovers.Routes, prefix)
|
|
||||||
err = headscale.SetPolicy(tt.pol)
|
err = headscale.SetPolicy(tt.pol)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1858,20 +1831,7 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||||
|
|
||||||
// Add the route back to the auto approver in the policy, the route should
|
// Add the route back to the auto approver in the policy, the route should
|
||||||
// now become available again.
|
// now become available again.
|
||||||
var newApprovers policyv2.AutoApprovers
|
tt.pol.AutoApprovers.Routes[route.String()] = []string{tt.approver}
|
||||||
switch {
|
|
||||||
case strings.HasPrefix(tt.approver, "tag:"):
|
|
||||||
newApprovers = append(newApprovers, tagApprover(tt.approver))
|
|
||||||
case strings.HasPrefix(tt.approver, "group:"):
|
|
||||||
newApprovers = append(newApprovers, groupApprover(tt.approver))
|
|
||||||
default:
|
|
||||||
newApprovers = append(newApprovers, usernameApprover(tt.approver))
|
|
||||||
}
|
|
||||||
if tt.pol.AutoApprovers.Routes == nil {
|
|
||||||
tt.pol.AutoApprovers.Routes = make(map[netip.Prefix]policyv2.AutoApprovers)
|
|
||||||
}
|
|
||||||
prefix = *route
|
|
||||||
tt.pol.AutoApprovers.Routes[prefix] = newApprovers
|
|
||||||
err = headscale.SetPolicy(tt.pol)
|
err = headscale.SetPolicy(tt.pol)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -2110,9 +2070,7 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||||
"src": [
|
"src": [
|
||||||
"node"
|
"node"
|
||||||
],
|
],
|
||||||
"dst": [
|
"dst": []
|
||||||
"*:*"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}`)
|
}`)
|
||||||
|
@ -2132,7 +2090,8 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||||
weburl := fmt.Sprintf("http://%s/etc/hostname", webip)
|
weburl := fmt.Sprintf("http://%s/etc/hostname", webip)
|
||||||
t.Logf("webservice: %s, %s", webip.String(), weburl)
|
t.Logf("webservice: %s, %s", webip.String(), weburl)
|
||||||
|
|
||||||
aclPolicy := &policyv2.Policy{}
|
// Create ACL policy
|
||||||
|
aclPolicy := &policyv1.ACLPolicy{}
|
||||||
err = json.Unmarshal([]byte(aclPolicyStr), aclPolicy)
|
err = json.Unmarshal([]byte(aclPolicyStr), aclPolicy)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -2162,23 +2121,24 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||||
routerClient := allClients[0]
|
routerClient := allClients[0]
|
||||||
nodeClient := allClients[1]
|
nodeClient := allClients[1]
|
||||||
|
|
||||||
aclPolicy.Hosts = policyv2.Hosts{
|
aclPolicy.Hosts = policyv1.Hosts{
|
||||||
policyv2.Host(routerUser): policyv2.Prefix(must.Get(routerClient.MustIPv4().Prefix(32))),
|
routerUser: must.Get(routerClient.MustIPv4().Prefix(32)),
|
||||||
policyv2.Host(nodeUser): policyv2.Prefix(must.Get(nodeClient.MustIPv4().Prefix(32))),
|
nodeUser: must.Get(nodeClient.MustIPv4().Prefix(32)),
|
||||||
}
|
}
|
||||||
aclPolicy.ACLs[1].Destinations = []policyv2.AliasWithPorts{
|
aclPolicy.ACLs[1].Destinations = []string{
|
||||||
aliasWithPorts(prefixp(route.String()), tailcfg.PortRangeAny),
|
route.String() + ":*",
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, headscale.SetPolicy(aclPolicy))
|
require.NoError(t, headscale.SetPolicy(aclPolicy))
|
||||||
|
|
||||||
// Set up the subnet routes for the router
|
// Set up the subnet routes for the router
|
||||||
routes := []netip.Prefix{
|
routes := []string{
|
||||||
*route, // This should be accessible by the client
|
route.String(), // This should be accessible by the client
|
||||||
netip.MustParsePrefix("10.10.11.0/24"), // These should NOT be accessible
|
"10.10.11.0/24", // These should NOT be accessible
|
||||||
netip.MustParsePrefix("10.10.12.0/24"),
|
"10.10.12.0/24",
|
||||||
}
|
}
|
||||||
|
|
||||||
routeArg := "--advertise-routes=" + routes[0].String() + "," + routes[1].String() + "," + routes[2].String()
|
routeArg := "--advertise-routes=" + routes[0] + "," + routes[1] + "," + routes[2]
|
||||||
command := []string{
|
command := []string{
|
||||||
"tailscale",
|
"tailscale",
|
||||||
"set",
|
"set",
|
||||||
|
@ -2248,4 +2208,5 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||||
tr, err := nodeClient.Traceroute(webip)
|
tr, err := nodeClient.Traceroute(webip)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assertTracerouteViaIP(t, tr, routerClient.MustIPv4())
|
assertTracerouteViaIP(t, tr, routerClient.MustIPv4())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var usePostgresForTest = envknob.Bool("HEADSCALE_INTEGRATION_POSTGRES")
|
var usePostgresForTest = envknob.Bool("HEADSCALE_INTEGRATION_POSTGRES")
|
||||||
|
var usePolicyV1ForTest = envknob.Bool("HEADSCALE_POLICY_V1")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNoHeadscaleAvailable = errors.New("no headscale available")
|
errNoHeadscaleAvailable = errors.New("no headscale available")
|
||||||
|
@ -413,6 +414,10 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
|
||||||
opts = append(opts, hsic.WithPostgres())
|
opts = append(opts, hsic.WithPostgres())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if usePolicyV1ForTest {
|
||||||
|
opts = append(opts, hsic.WithPolicyV1())
|
||||||
|
}
|
||||||
|
|
||||||
headscale, err := hsic.New(s.pool, s.Networks(), opts...)
|
headscale, err := hsic.New(s.pool, s.Networks(), opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create headscale container: %w", err)
|
return nil, fmt.Errorf("failed to create headscale container: %w", err)
|
||||||
|
@ -842,7 +847,6 @@ func (s *Scenario) runHeadscaleRegister(userStr string, body string) error {
|
||||||
return errParseAuthPage
|
return errParseAuthPage
|
||||||
}
|
}
|
||||||
key := keySep[1]
|
key := keySep[1]
|
||||||
key = strings.SplitN(key, " ", 2)[0]
|
|
||||||
log.Printf("registering node %s", key)
|
log.Printf("registering node %s", key)
|
||||||
|
|
||||||
if headscale, err := s.Headscale(); err == nil {
|
if headscale, err := s.Headscale(); err == nil {
|
||||||
|
|
|
@ -7,11 +7,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func isSSHNoAccessStdError(stderr string) bool {
|
func isSSHNoAccessStdError(stderr string) bool {
|
||||||
|
@ -49,7 +48,7 @@ var retry = func(times int, sleepInterval time.Duration,
|
||||||
return result, stderr, err
|
return result, stderr, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshScenario(t *testing.T, policy *policyv2.Policy, clientsPerUser int) *Scenario {
|
func sshScenario(t *testing.T, policy *policyv1.ACLPolicy, clientsPerUser int) *Scenario {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
spec := ScenarioSpec{
|
spec := ScenarioSpec{
|
||||||
|
@ -93,26 +92,23 @@ func TestSSHOneUserToAll(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
scenario := sshScenario(t,
|
scenario := sshScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:integration-test": {"user1@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Protocol: "tcp",
|
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SSHs: []policyv2.SSH{
|
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
Sources: []string{"*"},
|
||||||
Destinations: policyv2.SSHDstAliases{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
},
|
||||||
|
},
|
||||||
|
SSHs: []policyv1.SSH{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:integration-test"},
|
||||||
|
Destinations: []string{"*"},
|
||||||
|
Users: []string{"ssh-it-user"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -161,26 +157,23 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
scenario := sshScenario(t,
|
scenario := sshScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@"), policyv2.Username("user2@")},
|
"group:integration-test": {"user1@", "user2@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Protocol: "tcp",
|
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SSHs: []policyv2.SSH{
|
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
Sources: []string{"*"},
|
||||||
Destinations: policyv2.SSHDstAliases{usernamep("user1@"), usernamep("user2@")},
|
Destinations: []string{"*:*"},
|
||||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
},
|
||||||
|
},
|
||||||
|
SSHs: []policyv1.SSH{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:integration-test"},
|
||||||
|
Destinations: []string{"user1@", "user2@"},
|
||||||
|
Users: []string{"ssh-it-user"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -217,21 +210,18 @@ func TestSSHNoSSHConfigured(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
scenario := sshScenario(t,
|
scenario := sshScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:integration-test": {"user1@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Protocol: "tcp",
|
Sources: []string{"*"},
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
Destinations: []string{"*:*"},
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
SSHs: []policyv2.SSH{},
|
SSHs: []policyv1.SSH{},
|
||||||
},
|
},
|
||||||
len(MustTestVersions),
|
len(MustTestVersions),
|
||||||
)
|
)
|
||||||
|
@ -262,26 +252,23 @@ func TestSSHIsBlockedInACL(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
scenario := sshScenario(t,
|
scenario := sshScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:integration-test"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:integration-test": {"user1@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Protocol: "tcp",
|
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRange{First: 80, Last: 80}),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SSHs: []policyv2.SSH{
|
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: policyv2.SSHSrcAliases{groupp("group:integration-test")},
|
Sources: []string{"*"},
|
||||||
Destinations: policyv2.SSHDstAliases{usernamep("user1@")},
|
Destinations: []string{"*:80"},
|
||||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
},
|
||||||
|
},
|
||||||
|
SSHs: []policyv1.SSH{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:integration-test"},
|
||||||
|
Destinations: []string{"user1@"},
|
||||||
|
Users: []string{"ssh-it-user"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -314,33 +301,30 @@ func TestSSHUserOnlyIsolation(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
scenario := sshScenario(t,
|
scenario := sshScenario(t,
|
||||||
&policyv2.Policy{
|
&policyv1.ACLPolicy{
|
||||||
Groups: policyv2.Groups{
|
Groups: map[string][]string{
|
||||||
policyv2.Group("group:ssh1"): []policyv2.Username{policyv2.Username("user1@")},
|
"group:ssh1": {"user1@"},
|
||||||
policyv2.Group("group:ssh2"): []policyv2.Username{policyv2.Username("user2@")},
|
"group:ssh2": {"user2@"},
|
||||||
},
|
},
|
||||||
ACLs: []policyv2.ACL{
|
ACLs: []policyv1.ACL{
|
||||||
{
|
|
||||||
Action: "accept",
|
|
||||||
Protocol: "tcp",
|
|
||||||
Sources: []policyv2.Alias{wildcard()},
|
|
||||||
Destinations: []policyv2.AliasWithPorts{
|
|
||||||
aliasWithPorts(wildcard(), tailcfg.PortRangeAny),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SSHs: []policyv2.SSH{
|
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: policyv2.SSHSrcAliases{groupp("group:ssh1")},
|
Sources: []string{"*"},
|
||||||
Destinations: policyv2.SSHDstAliases{usernamep("user1@")},
|
Destinations: []string{"*:*"},
|
||||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
},
|
||||||
|
},
|
||||||
|
SSHs: []policyv1.SSH{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:ssh1"},
|
||||||
|
Destinations: []string{"user1@"},
|
||||||
|
Users: []string{"ssh-it-user"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: policyv2.SSHSrcAliases{groupp("group:ssh2")},
|
Sources: []string{"group:ssh2"},
|
||||||
Destinations: policyv2.SSHDstAliases{usernamep("user2@")},
|
Destinations: []string{"user2@"},
|
||||||
Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")},
|
Users: []string{"ssh-it-user"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -5,19 +5,15 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/netip"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
|
||||||
"github.com/juanfont/headscale/hscontrol/util"
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"tailscale.com/tailcfg"
|
|
||||||
"tailscale.com/types/ptr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -423,76 +419,10 @@ func countMatchingLines(in io.Reader, predicate func(string) bool) (int, error)
|
||||||
// return peer
|
// return peer
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// return nil
|
// return nil
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// Helper functions for creating typed policy entities
|
|
||||||
|
|
||||||
// wildcard returns a wildcard alias (*).
|
|
||||||
func wildcard() policyv2.Alias {
|
|
||||||
return policyv2.Wildcard
|
|
||||||
}
|
|
||||||
|
|
||||||
// usernamep returns a pointer to a Username as an Alias.
|
|
||||||
func usernamep(name string) policyv2.Alias {
|
|
||||||
return ptr.To(policyv2.Username(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// hostp returns a pointer to a Host.
|
|
||||||
func hostp(name string) policyv2.Alias {
|
|
||||||
return ptr.To(policyv2.Host(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// groupp returns a pointer to a Group as an Alias.
|
|
||||||
func groupp(name string) policyv2.Alias {
|
|
||||||
return ptr.To(policyv2.Group(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagp returns a pointer to a Tag as an Alias.
|
|
||||||
func tagp(name string) policyv2.Alias {
|
|
||||||
return ptr.To(policyv2.Tag(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// prefixp returns a pointer to a Prefix from a CIDR string.
|
|
||||||
func prefixp(cidr string) policyv2.Alias {
|
|
||||||
prefix := netip.MustParsePrefix(cidr)
|
|
||||||
return ptr.To(policyv2.Prefix(prefix))
|
|
||||||
}
|
|
||||||
|
|
||||||
// aliasWithPorts creates an AliasWithPorts structure from an alias and ports.
|
|
||||||
func aliasWithPorts(alias policyv2.Alias, ports ...tailcfg.PortRange) policyv2.AliasWithPorts {
|
|
||||||
return policyv2.AliasWithPorts{
|
|
||||||
Alias: alias,
|
|
||||||
Ports: ports,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// usernameOwner returns a Username as an Owner for use in TagOwners.
|
|
||||||
func usernameOwner(name string) policyv2.Owner {
|
|
||||||
return ptr.To(policyv2.Username(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// groupOwner returns a Group as an Owner for use in TagOwners.
|
|
||||||
func groupOwner(name string) policyv2.Owner {
|
|
||||||
return ptr.To(policyv2.Group(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// usernameApprover returns a Username as an AutoApprover.
|
|
||||||
func usernameApprover(name string) policyv2.AutoApprover {
|
|
||||||
return ptr.To(policyv2.Username(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// groupApprover returns a Group as an AutoApprover.
|
|
||||||
func groupApprover(name string) policyv2.AutoApprover {
|
|
||||||
return ptr.To(policyv2.Group(name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagApprover returns a Tag as an AutoApprover.
|
|
||||||
func tagApprover(name string) policyv2.AutoApprover {
|
|
||||||
return ptr.To(policyv2.Tag(name))
|
|
||||||
}
|
|
||||||
//
|
//
|
||||||
// // findPeerByHostname takes a hostname and a map of peers from status.Peer, and returns a *ipnstate.PeerStatus
|
// // findPeerByHostname takes a hostname and a map of peers from status.Peer, and returns a *ipnstate.PeerStatus
|
||||||
// // if there is a peer with the given hostname. If no peer is found, nil is returned.
|
// // if there is a peer with the given hostname. If no peer is found, nil is returned.
|
||||||
|
|
|
@ -58,6 +58,9 @@ theme:
|
||||||
|
|
||||||
# Excludes
|
# Excludes
|
||||||
exclude_docs: |
|
exclude_docs: |
|
||||||
|
/packaging/README.md
|
||||||
|
/packaging/postinstall.sh
|
||||||
|
/packaging/postremove.sh
|
||||||
/requirements.txt
|
/requirements.txt
|
||||||
|
|
||||||
# Plugins
|
# Plugins
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
# Packaging
|
|
||||||
|
|
||||||
We use [nFPM](https://nfpm.goreleaser.com/) for making `.deb` packages.
|
|
||||||
|
|
||||||
This folder contains files we need to package with these releases.
|
|
|
@ -1,87 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# postinst script for headscale.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Summary of how this script can be called:
|
|
||||||
# * <postinst> 'configure' <most-recently-configured-version>
|
|
||||||
# * <old-postinst> 'abort-upgrade' <new version>
|
|
||||||
# * <conflictor's-postinst> 'abort-remove' 'in-favour' <package>
|
|
||||||
# <new-version>
|
|
||||||
# * <postinst> 'abort-remove'
|
|
||||||
# * <deconfigured's-postinst> 'abort-deconfigure' 'in-favour'
|
|
||||||
# <failed-install-package> <version> 'removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package.
|
|
||||||
|
|
||||||
HEADSCALE_USER="headscale"
|
|
||||||
HEADSCALE_GROUP="headscale"
|
|
||||||
HEADSCALE_HOME_DIR="/var/lib/headscale"
|
|
||||||
HEADSCALE_SHELL="/usr/sbin/nologin"
|
|
||||||
HEADSCALE_SERVICE="headscale.service"
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
configure)
|
|
||||||
groupadd --force --system "$HEADSCALE_GROUP"
|
|
||||||
if ! id -u "$HEADSCALE_USER" >/dev/null 2>&1; then
|
|
||||||
useradd --system --shell "$HEADSCALE_SHELL" \
|
|
||||||
--gid "$HEADSCALE_GROUP" --home-dir "$HEADSCALE_HOME_DIR" \
|
|
||||||
--comment "headscale default user" "$HEADSCALE_USER"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if dpkg --compare-versions "$2" lt-nl "0.27"; then
|
|
||||||
# < 0.24.0-beta.1 used /home/headscale as home and /bin/sh as shell.
|
|
||||||
# The directory /home/headscale was not created by the package or
|
|
||||||
# useradd but the service always used /var/lib/headscale which was
|
|
||||||
# always shipped by the package as empty directory. Previous versions
|
|
||||||
# of the package did not update the user account properties.
|
|
||||||
usermod --home "$HEADSCALE_HOME_DIR" --shell "$HEADSCALE_SHELL" \
|
|
||||||
"$HEADSCALE_USER" >/dev/null
|
|
||||||
fi
|
|
||||||
|
|
||||||
if dpkg --compare-versions "$2" lt-nl "0.27" \
|
|
||||||
&& [ $(id --user "$HEADSCALE_USER") -ge 1000 ] \
|
|
||||||
&& [ $(id --group "$HEADSCALE_GROUP") -ge 1000 ]; then
|
|
||||||
# < 0.26.0-beta.1 created a regular user/group to run headscale.
|
|
||||||
# Previous versions of the package did not migrate to system uid/gid.
|
|
||||||
# Assume that the *default* uid/gid range is in use and only run this
|
|
||||||
# migration when the current uid/gid is allocated in the user range.
|
|
||||||
# Create a temporary system user/group to guarantee the allocation of a
|
|
||||||
# uid/gid in the system range. Assign this new uid/gid to the existing
|
|
||||||
# user and group and remove the temporary user/group afterwards.
|
|
||||||
tmp_name="headscaletmp"
|
|
||||||
useradd --system --no-log-init --no-create-home --shell "$HEADSCALE_SHELL" "$tmp_name"
|
|
||||||
tmp_uid="$(id --user "$tmp_name")"
|
|
||||||
tmp_gid="$(id --group "$tmp_name")"
|
|
||||||
usermod --non-unique --uid "$tmp_uid" --gid "$tmp_gid" "$HEADSCALE_USER"
|
|
||||||
groupmod --non-unique --gid "$tmp_gid" "$HEADSCALE_USER"
|
|
||||||
userdel --force "$tmp_name"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Enable service and keep track of its state
|
|
||||||
if deb-systemd-helper --quiet was-enabled "$HEADSCALE_SERVICE"; then
|
|
||||||
deb-systemd-helper enable "$HEADSCALE_SERVICE" >/dev/null || true
|
|
||||||
else
|
|
||||||
deb-systemd-helper update-state "$HEADSCALE_SERVICE" >/dev/null || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Bounce service
|
|
||||||
if [ -d /run/systemd/system ]; then
|
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
|
||||||
if [ -n "$2" ]; then
|
|
||||||
deb-systemd-invoke restart "$HEADSCALE_SERVICE" >/dev/null || true
|
|
||||||
else
|
|
||||||
deb-systemd-invoke start "$HEADSCALE_SERVICE" >/dev/null || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postinst called with unknown argument '$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
|
@ -1,42 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# postrm script for headscale.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Summary of how this script can be called:
|
|
||||||
# * <postrm> 'remove'
|
|
||||||
# * <postrm> 'purge'
|
|
||||||
# * <old-postrm> 'upgrade' <new-version>
|
|
||||||
# * <new-postrm> 'failed-upgrade' <old-version>
|
|
||||||
# * <new-postrm> 'abort-install'
|
|
||||||
# * <new-postrm> 'abort-install' <old-version>
|
|
||||||
# * <new-postrm> 'abort-upgrade' <old-version>
|
|
||||||
# * <disappearer's-postrm> 'disappear' <overwriter>
|
|
||||||
# <overwriter-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package.
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
remove)
|
|
||||||
if [ -d /run/systemd/system ]; then
|
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
purge)
|
|
||||||
userdel headscale
|
|
||||||
rm -rf /var/lib/headscale
|
|
||||||
if [ -x "/usr/bin/deb-systemd-helper" ]; then
|
|
||||||
deb-systemd-helper purge headscale.service >/dev/null || true
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postrm called with unknown argument '$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
|
@ -1,34 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# prerm script for headscale.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Summary of how this script can be called:
|
|
||||||
# * <prerm> 'remove'
|
|
||||||
# * <old-prerm> 'upgrade' <new-version>
|
|
||||||
# * <new-prerm> 'failed-upgrade' <old-version>
|
|
||||||
# * <conflictor's-prerm> 'remove' 'in-favour' <package> <new-version>
|
|
||||||
# * <deconfigured's-prerm> 'deconfigure' 'in-favour'
|
|
||||||
# <package-being-installed> <version> 'removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package.
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
remove)
|
|
||||||
if [ -d /run/systemd/system ]; then
|
|
||||||
deb-systemd-invoke stop headscale.service >/dev/null || true
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
upgrade|deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
failed-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "prerm called with unknown argument '$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
Loading…
Add table
Add a link
Reference in a new issue